prompt_id
int64 0
941
| project
stringclasses 24
values | module
stringlengths 7
49
| class
stringlengths 0
32
| method
stringlengths 2
37
| focal_method_txt
stringlengths 43
41.5k
| focal_method_lines
listlengths 2
2
| in_stack
bool 2
classes | globals
listlengths 0
16
| type_context
stringlengths 79
41.9k
| has_branch
bool 2
classes | total_branches
int64 0
3
|
---|---|---|---|---|---|---|---|---|---|---|---|
0 |
apimd
|
apimd.loader
|
walk_packages
|
def walk_packages(name: str, path: str) -> Iterator[tuple[str, str]]:
"""Walk packages without import them."""
path = abspath(path) + sep
valid = (path + name, path + name + PEP561_SUFFIX)
for root, _, fs in walk(path):
for f in fs:
if not f.endswith(('.py', '.pyi')):
continue
f_path = parent(join(root, f))
if not f_path.startswith(valid):
continue
name = (f_path
.removeprefix(path)
.replace(PEP561_SUFFIX, "")
.replace(sep, '.')
.removesuffix('.__init__'))
yield name, f_path
|
[
43,
59
] | false |
[
"__author__",
"__copyright__",
"__license__",
"__email__",
"PEP561_SUFFIX"
] |
from typing import Optional
from collections.abc import Sequence, Iterator
from sys import path as sys_path
from os import mkdir, walk
from os.path import isdir, isfile, abspath, join, sep, dirname
from importlib.abc import Loader
from importlib.machinery import EXTENSION_SUFFIXES
from importlib.util import find_spec, spec_from_file_location, module_from_spec
from .logger import logger
from .parser import parent, Parser
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2020-2021"
__license__ = "MIT"
__email__ = "pyslvs@gmail.com"
PEP561_SUFFIX = '-stubs'
def walk_packages(name: str, path: str) -> Iterator[tuple[str, str]]:
"""Walk packages without import them."""
path = abspath(path) + sep
valid = (path + name, path + name + PEP561_SUFFIX)
for root, _, fs in walk(path):
for f in fs:
if not f.endswith(('.py', '.pyi')):
continue
f_path = parent(join(root, f))
if not f_path.startswith(valid):
continue
name = (f_path
.removeprefix(path)
.replace(PEP561_SUFFIX, "")
.replace(sep, '.')
.removesuffix('.__init__'))
yield name, f_path
| true | 2 |
|
1 |
apimd
|
apimd.loader
|
loader
|
def loader(root: str, pwd: str, link: bool, level: int, toc: bool) -> str:
"""Package searching algorithm."""
p = Parser.new(link, level, toc)
for name, path in walk_packages(root, pwd):
# Load its source or stub
pure_py = False
for ext in [".py", ".pyi"]:
path_ext = path + ext
if not isfile(path_ext):
continue
logger.debug(f"{name} <= {path_ext}")
p.parse(name, _read(path_ext))
if ext == ".py":
pure_py = True
if pure_py:
continue
logger.debug(f"loading extension module for fully documented:")
# Try to load module here
for ext in EXTENSION_SUFFIXES:
path_ext = path + ext
if not isfile(path_ext):
continue
logger.debug(f"{name} <= {path_ext}")
if _load_module(name, path_ext, p):
break
else:
logger.warning(f"no module for {name} in this platform")
return p.compile()
|
[
78,
105
] | false |
[
"__author__",
"__copyright__",
"__license__",
"__email__",
"PEP561_SUFFIX"
] |
from typing import Optional
from collections.abc import Sequence, Iterator
from sys import path as sys_path
from os import mkdir, walk
from os.path import isdir, isfile, abspath, join, sep, dirname
from importlib.abc import Loader
from importlib.machinery import EXTENSION_SUFFIXES
from importlib.util import find_spec, spec_from_file_location, module_from_spec
from .logger import logger
from .parser import parent, Parser
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2020-2021"
__license__ = "MIT"
__email__ = "pyslvs@gmail.com"
PEP561_SUFFIX = '-stubs'
def loader(root: str, pwd: str, link: bool, level: int, toc: bool) -> str:
"""Package searching algorithm."""
p = Parser.new(link, level, toc)
for name, path in walk_packages(root, pwd):
# Load its source or stub
pure_py = False
for ext in [".py", ".pyi"]:
path_ext = path + ext
if not isfile(path_ext):
continue
logger.debug(f"{name} <= {path_ext}")
p.parse(name, _read(path_ext))
if ext == ".py":
pure_py = True
if pure_py:
continue
logger.debug(f"loading extension module for fully documented:")
# Try to load module here
for ext in EXTENSION_SUFFIXES:
path_ext = path + ext
if not isfile(path_ext):
continue
logger.debug(f"{name} <= {path_ext}")
if _load_module(name, path_ext, p):
break
else:
logger.warning(f"no module for {name} in this platform")
return p.compile()
| true | 2 |
|
2 |
apimd
|
apimd.parser
|
is_public_family
|
def is_public_family(name: str) -> bool:
"""Check the name is come from public modules or not."""
for n in name.split('.'):
# Magic name
if is_magic(n):
continue
# Local or private name
if n.startswith('_'):
return False
return True
|
[
61,
70
] | false |
[
"__author__",
"__copyright__",
"__license__",
"__email__",
"_I",
"_G",
"_API",
"ANY"
] |
from typing import cast, TypeVar, Union, Optional
from types import ModuleType
from collections.abc import Sequence, Iterable, Iterator
from itertools import chain
from dataclasses import dataclass, field
from inspect import getdoc
from ast import (
parse, unparse, get_docstring, AST, FunctionDef, AsyncFunctionDef, ClassDef,
Assign, AnnAssign, Delete, Import, ImportFrom, Name, Expr, Subscript, BinOp,
BitOr, Call, If, Try, Tuple, List, Set, Dict, Constant, Load, Attribute,
arg, expr, stmt, arguments, NodeTransformer,
)
from .logger import logger
from .pep585 import PEP585
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2020-2021"
__license__ = "MIT"
__email__ = "pyslvs@gmail.com"
_I = Union[Import, ImportFrom]
_G = Union[Assign, AnnAssign]
_API = Union[FunctionDef, AsyncFunctionDef, ClassDef]
ANY = 'Any'
def is_public_family(name: str) -> bool:
"""Check the name is come from public modules or not."""
for n in name.split('.'):
# Magic name
if is_magic(n):
continue
# Local or private name
if n.startswith('_'):
return False
return True
| true | 2 |
|
3 |
apimd
|
apimd.parser
|
walk_body
|
def walk_body(body: Sequence[stmt]) -> Iterator[stmt]:
"""Traverse around body and its simple definition scope."""
for node in body:
if isinstance(node, If):
yield from walk_body(node.body)
yield from walk_body(node.orelse)
elif isinstance(node, Try):
yield from walk_body(node.body)
for h in node.handlers:
yield from walk_body(h.body)
yield from walk_body(node.orelse)
yield from walk_body(node.finalbody)
else:
yield node
|
[
73,
86
] | false |
[
"__author__",
"__copyright__",
"__license__",
"__email__",
"_I",
"_G",
"_API",
"ANY"
] |
from typing import cast, TypeVar, Union, Optional
from types import ModuleType
from collections.abc import Sequence, Iterable, Iterator
from itertools import chain
from dataclasses import dataclass, field
from inspect import getdoc
from ast import (
parse, unparse, get_docstring, AST, FunctionDef, AsyncFunctionDef, ClassDef,
Assign, AnnAssign, Delete, Import, ImportFrom, Name, Expr, Subscript, BinOp,
BitOr, Call, If, Try, Tuple, List, Set, Dict, Constant, Load, Attribute,
arg, expr, stmt, arguments, NodeTransformer,
)
from .logger import logger
from .pep585 import PEP585
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2020-2021"
__license__ = "MIT"
__email__ = "pyslvs@gmail.com"
_I = Union[Import, ImportFrom]
_G = Union[Assign, AnnAssign]
_API = Union[FunctionDef, AsyncFunctionDef, ClassDef]
ANY = 'Any'
def walk_body(body: Sequence[stmt]) -> Iterator[stmt]:
"""Traverse around body and its simple definition scope."""
for node in body:
if isinstance(node, If):
yield from walk_body(node.body)
yield from walk_body(node.orelse)
elif isinstance(node, Try):
yield from walk_body(node.body)
for h in node.handlers:
yield from walk_body(h.body)
yield from walk_body(node.orelse)
yield from walk_body(node.finalbody)
else:
yield node
| true | 2 |
|
4 |
apimd
|
apimd.parser
|
esc_underscore
|
def esc_underscore(doc: str) -> str:
"""Escape underscore in names."""
if doc.count('_') > 1:
return doc.replace('_', r"\_")
else:
return doc
|
[
100,
105
] | false |
[
"__author__",
"__copyright__",
"__license__",
"__email__",
"_I",
"_G",
"_API",
"ANY"
] |
from typing import cast, TypeVar, Union, Optional
from types import ModuleType
from collections.abc import Sequence, Iterable, Iterator
from itertools import chain
from dataclasses import dataclass, field
from inspect import getdoc
from ast import (
parse, unparse, get_docstring, AST, FunctionDef, AsyncFunctionDef, ClassDef,
Assign, AnnAssign, Delete, Import, ImportFrom, Name, Expr, Subscript, BinOp,
BitOr, Call, If, Try, Tuple, List, Set, Dict, Constant, Load, Attribute,
arg, expr, stmt, arguments, NodeTransformer,
)
from .logger import logger
from .pep585 import PEP585
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2020-2021"
__license__ = "MIT"
__email__ = "pyslvs@gmail.com"
_I = Union[Import, ImportFrom]
_G = Union[Assign, AnnAssign]
_API = Union[FunctionDef, AsyncFunctionDef, ClassDef]
ANY = 'Any'
def esc_underscore(doc: str) -> str:
"""Escape underscore in names."""
if doc.count('_') > 1:
return doc.replace('_', r"\_")
else:
return doc
| true | 2 |
|
5 |
apimd
|
apimd.parser
|
doctest
|
def doctest(doc: str) -> str:
"""Wrap doctest as markdown Python code."""
keep = False
docs = []
lines = doc.splitlines()
for i, line in enumerate(lines):
signed = line.startswith(">>> ")
if signed:
if not keep:
docs.append("```python")
keep = True
elif keep:
docs.append("```")
keep = False
docs.append(line)
if signed and i == len(lines) - 1:
docs.append("```")
keep = False
return '\n'.join(docs)
|
[
108,
126
] | false |
[
"__author__",
"__copyright__",
"__license__",
"__email__",
"_I",
"_G",
"_API",
"ANY"
] |
from typing import cast, TypeVar, Union, Optional
from types import ModuleType
from collections.abc import Sequence, Iterable, Iterator
from itertools import chain
from dataclasses import dataclass, field
from inspect import getdoc
from ast import (
parse, unparse, get_docstring, AST, FunctionDef, AsyncFunctionDef, ClassDef,
Assign, AnnAssign, Delete, Import, ImportFrom, Name, Expr, Subscript, BinOp,
BitOr, Call, If, Try, Tuple, List, Set, Dict, Constant, Load, Attribute,
arg, expr, stmt, arguments, NodeTransformer,
)
from .logger import logger
from .pep585 import PEP585
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2020-2021"
__license__ = "MIT"
__email__ = "pyslvs@gmail.com"
_I = Union[Import, ImportFrom]
_G = Union[Assign, AnnAssign]
_API = Union[FunctionDef, AsyncFunctionDef, ClassDef]
ANY = 'Any'
def doctest(doc: str) -> str:
"""Wrap doctest as markdown Python code."""
keep = False
docs = []
lines = doc.splitlines()
for i, line in enumerate(lines):
signed = line.startswith(">>> ")
if signed:
if not keep:
docs.append("```python")
keep = True
elif keep:
docs.append("```")
keep = False
docs.append(line)
if signed and i == len(lines) - 1:
docs.append("```")
keep = False
return '\n'.join(docs)
| true | 2 |
|
6 |
apimd
|
apimd.parser
|
table
|
def table(*titles: str, items: Iterable[Union[str, Iterable[str]]]) -> str:
"""Create multi-column table with the titles.
Usage:
>>> table('a', 'b', [['c', 'd'], ['e', 'f']])
| a | b |
|:---:|:---:|
| c | d |
| e | f |
"""
return '\n'.join([_table_cell(titles), _table_split(titles),
'\n'.join(_table_cell([n] if isinstance(n, str) else n)
for n in items)]) + '\n\n'
|
[
140,
150
] | false |
[
"__author__",
"__copyright__",
"__license__",
"__email__",
"_I",
"_G",
"_API",
"ANY"
] |
from typing import cast, TypeVar, Union, Optional
from types import ModuleType
from collections.abc import Sequence, Iterable, Iterator
from itertools import chain
from dataclasses import dataclass, field
from inspect import getdoc
from ast import (
parse, unparse, get_docstring, AST, FunctionDef, AsyncFunctionDef, ClassDef,
Assign, AnnAssign, Delete, Import, ImportFrom, Name, Expr, Subscript, BinOp,
BitOr, Call, If, Try, Tuple, List, Set, Dict, Constant, Load, Attribute,
arg, expr, stmt, arguments, NodeTransformer,
)
from .logger import logger
from .pep585 import PEP585
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2020-2021"
__license__ = "MIT"
__email__ = "pyslvs@gmail.com"
_I = Union[Import, ImportFrom]
_G = Union[Assign, AnnAssign]
_API = Union[FunctionDef, AsyncFunctionDef, ClassDef]
ANY = 'Any'
def table(*titles: str, items: Iterable[Union[str, Iterable[str]]]) -> str:
"""Create multi-column table with the titles.
Usage:
>>> table('a', 'b', [['c', 'd'], ['e', 'f']])
| a | b |
|:---:|:---:|
| c | d |
| e | f |
"""
return '\n'.join([_table_cell(titles), _table_split(titles),
'\n'.join(_table_cell([n] if isinstance(n, str) else n)
for n in items)]) + '\n\n'
| false | 0 |
|
7 |
apimd
|
apimd.parser
|
const_type
|
def const_type(node: expr) -> str:
"""Constant type inference."""
if isinstance(node, Constant):
return _type_name(node.value)
elif isinstance(node, (Tuple, List, Set)):
return _type_name(node).lower() + _e_type(node.elts)
elif isinstance(node, Dict):
return 'dict' + _e_type(node.keys, node.values)
elif isinstance(node, Call) and isinstance(node.func, (Name, Attribute)):
func = unparse(node.func)
if func in chain({'bool', 'int', 'float', 'complex', 'str'},
PEP585.keys(), PEP585.values()):
return func
return ANY
|
[
181,
194
] | false |
[
"__author__",
"__copyright__",
"__license__",
"__email__",
"_I",
"_G",
"_API",
"ANY"
] |
from typing import cast, TypeVar, Union, Optional
from types import ModuleType
from collections.abc import Sequence, Iterable, Iterator
from itertools import chain
from dataclasses import dataclass, field
from inspect import getdoc
from ast import (
parse, unparse, get_docstring, AST, FunctionDef, AsyncFunctionDef, ClassDef,
Assign, AnnAssign, Delete, Import, ImportFrom, Name, Expr, Subscript, BinOp,
BitOr, Call, If, Try, Tuple, List, Set, Dict, Constant, Load, Attribute,
arg, expr, stmt, arguments, NodeTransformer,
)
from .logger import logger
from .pep585 import PEP585
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2020-2021"
__license__ = "MIT"
__email__ = "pyslvs@gmail.com"
_I = Union[Import, ImportFrom]
_G = Union[Assign, AnnAssign]
_API = Union[FunctionDef, AsyncFunctionDef, ClassDef]
ANY = 'Any'
def const_type(node: expr) -> str:
"""Constant type inference."""
if isinstance(node, Constant):
return _type_name(node.value)
elif isinstance(node, (Tuple, List, Set)):
return _type_name(node).lower() + _e_type(node.elts)
elif isinstance(node, Dict):
return 'dict' + _e_type(node.keys, node.values)
elif isinstance(node, Call) and isinstance(node.func, (Name, Attribute)):
func = unparse(node.func)
if func in chain({'bool', 'int', 'float', 'complex', 'str'},
PEP585.keys(), PEP585.values()):
return func
return ANY
| true | 2 |
|
8 |
apimd
|
apimd.parser
|
Resolver
|
visit_Constant
|
def visit_Constant(self, node: Constant) -> AST:
"""Check string is a name."""
if not isinstance(node.value, str):
return node
try:
e = cast(Expr, parse(node.value).body[0])
except SyntaxError:
return node
else:
return self.visit(e.value)
|
[
207,
216
] | false |
[
"__author__",
"__copyright__",
"__license__",
"__email__",
"_I",
"_G",
"_API",
"ANY"
] |
from typing import cast, TypeVar, Union, Optional
from types import ModuleType
from collections.abc import Sequence, Iterable, Iterator
from itertools import chain
from dataclasses import dataclass, field
from inspect import getdoc
from ast import (
parse, unparse, get_docstring, AST, FunctionDef, AsyncFunctionDef, ClassDef,
Assign, AnnAssign, Delete, Import, ImportFrom, Name, Expr, Subscript, BinOp,
BitOr, Call, If, Try, Tuple, List, Set, Dict, Constant, Load, Attribute,
arg, expr, stmt, arguments, NodeTransformer,
)
from .logger import logger
from .pep585 import PEP585
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2020-2021"
__license__ = "MIT"
__email__ = "pyslvs@gmail.com"
_I = Union[Import, ImportFrom]
_G = Union[Assign, AnnAssign]
_API = Union[FunctionDef, AsyncFunctionDef, ClassDef]
ANY = 'Any'
class Resolver(NodeTransformer):
def __init__(self, root: str, alias: dict[str, str], self_ty: str = ""):
"""Set root module, alias and generic self name."""
super(Resolver, self).__init__()
self.root = root
self.alias = alias
self.self_ty = self_ty
def visit_Constant(self, node: Constant) -> AST:
"""Check string is a name."""
if not isinstance(node.value, str):
return node
try:
e = cast(Expr, parse(node.value).body[0])
except SyntaxError:
return node
else:
return self.visit(e.value)
| true | 2 |
9 |
apimd
|
apimd.parser
|
Resolver
|
visit_Name
|
def visit_Name(self, node: Name) -> AST:
"""Replace global names with its expression recursively."""
if node.id == self.self_ty:
return Name("Self", Load())
name = _m(self.root, node.id)
if name in self.alias and name not in self.alias[name]:
e = cast(Expr, parse(self.alias[name]).body[0])
# Support `TypeVar`
if isinstance(e.value, Call) and isinstance(e.value.func, Name):
func_name = e.value.func.id
idf = self.alias.get(_m(self.root, func_name), func_name)
if idf == 'typing.TypeVar':
return node
return self.visit(e.value)
else:
return node
|
[
218,
233
] | false |
[
"__author__",
"__copyright__",
"__license__",
"__email__",
"_I",
"_G",
"_API",
"ANY"
] |
from typing import cast, TypeVar, Union, Optional
from types import ModuleType
from collections.abc import Sequence, Iterable, Iterator
from itertools import chain
from dataclasses import dataclass, field
from inspect import getdoc
from ast import (
parse, unparse, get_docstring, AST, FunctionDef, AsyncFunctionDef, ClassDef,
Assign, AnnAssign, Delete, Import, ImportFrom, Name, Expr, Subscript, BinOp,
BitOr, Call, If, Try, Tuple, List, Set, Dict, Constant, Load, Attribute,
arg, expr, stmt, arguments, NodeTransformer,
)
from .logger import logger
from .pep585 import PEP585
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2020-2021"
__license__ = "MIT"
__email__ = "pyslvs@gmail.com"
_I = Union[Import, ImportFrom]
_G = Union[Assign, AnnAssign]
_API = Union[FunctionDef, AsyncFunctionDef, ClassDef]
ANY = 'Any'
class Resolver(NodeTransformer):
def __init__(self, root: str, alias: dict[str, str], self_ty: str = ""):
"""Set root module, alias and generic self name."""
super(Resolver, self).__init__()
self.root = root
self.alias = alias
self.self_ty = self_ty
def visit_Name(self, node: Name) -> AST:
"""Replace global names with its expression recursively."""
if node.id == self.self_ty:
return Name("Self", Load())
name = _m(self.root, node.id)
if name in self.alias and name not in self.alias[name]:
e = cast(Expr, parse(self.alias[name]).body[0])
# Support `TypeVar`
if isinstance(e.value, Call) and isinstance(e.value.func, Name):
func_name = e.value.func.id
idf = self.alias.get(_m(self.root, func_name), func_name)
if idf == 'typing.TypeVar':
return node
return self.visit(e.value)
else:
return node
| true | 2 |
10 |
apimd
|
apimd.parser
|
Resolver
|
visit_Subscript
|
def visit_Subscript(self, node: Subscript) -> AST:
"""Implementation of PEP585 and PEP604."""
if not isinstance(node.value, Name):
return node
name = node.value.id
idf = self.alias.get(_m(self.root, name), name)
if idf == 'typing.Union':
if not isinstance(node.slice, Tuple):
return node.slice
b = node.slice.elts[0]
for e in node.slice.elts[1:]:
b = BinOp(b, BitOr(), e)
return b
elif idf == 'typing.Optional':
return BinOp(node.slice, BitOr(), Constant(None))
elif idf in PEP585:
logger.warning(f"{node.lineno}:{node.col_offset}: "
f"find deprecated name {idf}, "
f"recommended to use {PEP585[idf]}")
return Subscript(Name(PEP585[idf], Load), node.slice, node.ctx)
else:
return node
|
[
235,
256
] | false |
[
"__author__",
"__copyright__",
"__license__",
"__email__",
"_I",
"_G",
"_API",
"ANY"
] |
from typing import cast, TypeVar, Union, Optional
from types import ModuleType
from collections.abc import Sequence, Iterable, Iterator
from itertools import chain
from dataclasses import dataclass, field
from inspect import getdoc
from ast import (
parse, unparse, get_docstring, AST, FunctionDef, AsyncFunctionDef, ClassDef,
Assign, AnnAssign, Delete, Import, ImportFrom, Name, Expr, Subscript, BinOp,
BitOr, Call, If, Try, Tuple, List, Set, Dict, Constant, Load, Attribute,
arg, expr, stmt, arguments, NodeTransformer,
)
from .logger import logger
from .pep585 import PEP585
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2020-2021"
__license__ = "MIT"
__email__ = "pyslvs@gmail.com"
_I = Union[Import, ImportFrom]
_G = Union[Assign, AnnAssign]
_API = Union[FunctionDef, AsyncFunctionDef, ClassDef]
ANY = 'Any'
class Resolver(NodeTransformer):
def __init__(self, root: str, alias: dict[str, str], self_ty: str = ""):
"""Set root module, alias and generic self name."""
super(Resolver, self).__init__()
self.root = root
self.alias = alias
self.self_ty = self_ty
def visit_Subscript(self, node: Subscript) -> AST:
"""Implementation of PEP585 and PEP604."""
if not isinstance(node.value, Name):
return node
name = node.value.id
idf = self.alias.get(_m(self.root, name), name)
if idf == 'typing.Union':
if not isinstance(node.slice, Tuple):
return node.slice
b = node.slice.elts[0]
for e in node.slice.elts[1:]:
b = BinOp(b, BitOr(), e)
return b
elif idf == 'typing.Optional':
return BinOp(node.slice, BitOr(), Constant(None))
elif idf in PEP585:
logger.warning(f"{node.lineno}:{node.col_offset}: "
f"find deprecated name {idf}, "
f"recommended to use {PEP585[idf]}")
return Subscript(Name(PEP585[idf], Load), node.slice, node.ctx)
else:
return node
| true | 2 |
11 |
apimd
|
apimd.parser
|
Resolver
|
visit_Attribute
|
def visit_Attribute(self, node: Attribute) -> AST:
"""Remove `typing.*` prefix of annotation."""
if not isinstance(node.value, Name):
return node
if node.value.id == 'typing':
return Name(node.attr, Load())
else:
return node
|
[
258,
265
] | false |
[
"__author__",
"__copyright__",
"__license__",
"__email__",
"_I",
"_G",
"_API",
"ANY"
] |
from typing import cast, TypeVar, Union, Optional
from types import ModuleType
from collections.abc import Sequence, Iterable, Iterator
from itertools import chain
from dataclasses import dataclass, field
from inspect import getdoc
from ast import (
parse, unparse, get_docstring, AST, FunctionDef, AsyncFunctionDef, ClassDef,
Assign, AnnAssign, Delete, Import, ImportFrom, Name, Expr, Subscript, BinOp,
BitOr, Call, If, Try, Tuple, List, Set, Dict, Constant, Load, Attribute,
arg, expr, stmt, arguments, NodeTransformer,
)
from .logger import logger
from .pep585 import PEP585
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2020-2021"
__license__ = "MIT"
__email__ = "pyslvs@gmail.com"
_I = Union[Import, ImportFrom]
_G = Union[Assign, AnnAssign]
_API = Union[FunctionDef, AsyncFunctionDef, ClassDef]
ANY = 'Any'
class Resolver(NodeTransformer):
def __init__(self, root: str, alias: dict[str, str], self_ty: str = ""):
"""Set root module, alias and generic self name."""
super(Resolver, self).__init__()
self.root = root
self.alias = alias
self.self_ty = self_ty
def visit_Attribute(self, node: Attribute) -> AST:
"""Remove `typing.*` prefix of annotation."""
if not isinstance(node.value, Name):
return node
if node.value.id == 'typing':
return Name(node.attr, Load())
else:
return node
| true | 2 |
12 |
codetiming
|
codetiming._timers
|
Timers
|
apply
|
def apply(self, func: Callable[[List[float]], float], name: str) -> float:
"""Apply a function to the results of one named timer"""
if name in self._timings:
return func(self._timings[name])
raise KeyError(name)
|
[
41,
45
] | false |
[] |
import collections
import math
import statistics
from typing import TYPE_CHECKING, Any, Callable, Dict, List
class Timers(UserDict):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Add a private dictionary keeping track of all timings"""
super().__init__(*args, **kwargs)
self._timings: Dict[str, List[float]] = collections.defaultdict(list)
def apply(self, func: Callable[[List[float]], float], name: str) -> float:
"""Apply a function to the results of one named timer"""
if name in self._timings:
return func(self._timings[name])
raise KeyError(name)
| true | 2 |
13 |
codetiming
|
codetiming._timers
|
Timers
|
min
|
def min(self, name: str) -> float:
"""Minimal value of timings"""
return self.apply(lambda values: min(values or [0]), name=name)
|
[
55,
57
] | false |
[] |
import collections
import math
import statistics
from typing import TYPE_CHECKING, Any, Callable, Dict, List
class Timers(UserDict):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Add a private dictionary keeping track of all timings"""
super().__init__(*args, **kwargs)
self._timings: Dict[str, List[float]] = collections.defaultdict(list)
def min(self, name: str) -> float:
"""Minimal value of timings"""
return self.apply(lambda values: min(values or [0]), name=name)
| false | 0 |
14 |
codetiming
|
codetiming._timers
|
Timers
|
max
|
def max(self, name: str) -> float:
"""Maximal value of timings"""
return self.apply(lambda values: max(values or [0]), name=name)
|
[
59,
61
] | false |
[] |
import collections
import math
import statistics
from typing import TYPE_CHECKING, Any, Callable, Dict, List
class Timers(UserDict):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Add a private dictionary keeping track of all timings"""
super().__init__(*args, **kwargs)
self._timings: Dict[str, List[float]] = collections.defaultdict(list)
def max(self, name: str) -> float:
"""Maximal value of timings"""
return self.apply(lambda values: max(values or [0]), name=name)
| false | 0 |
15 |
codetiming
|
codetiming._timers
|
Timers
|
mean
|
def mean(self, name: str) -> float:
"""Mean value of timings"""
return self.apply(lambda values: statistics.mean(values or [0]), name=name)
|
[
63,
65
] | false |
[] |
import collections
import math
import statistics
from typing import TYPE_CHECKING, Any, Callable, Dict, List
class Timers(UserDict):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Add a private dictionary keeping track of all timings"""
super().__init__(*args, **kwargs)
self._timings: Dict[str, List[float]] = collections.defaultdict(list)
def mean(self, name: str) -> float:
"""Mean value of timings"""
return self.apply(lambda values: statistics.mean(values or [0]), name=name)
| false | 0 |
16 |
codetiming
|
codetiming._timers
|
Timers
|
median
|
def median(self, name: str) -> float:
"""Median value of timings"""
return self.apply(lambda values: statistics.median(values or [0]), name=name)
|
[
67,
69
] | false |
[] |
import collections
import math
import statistics
from typing import TYPE_CHECKING, Any, Callable, Dict, List
class Timers(UserDict):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Add a private dictionary keeping track of all timings"""
super().__init__(*args, **kwargs)
self._timings: Dict[str, List[float]] = collections.defaultdict(list)
def median(self, name: str) -> float:
"""Median value of timings"""
return self.apply(lambda values: statistics.median(values or [0]), name=name)
| false | 0 |
17 |
codetiming
|
codetiming._timers
|
Timers
|
stdev
|
def stdev(self, name: str) -> float:
"""Standard deviation of timings"""
if name in self._timings:
value = self._timings[name]
return statistics.stdev(value) if len(value) >= 2 else math.nan
raise KeyError(name)
|
[
71,
76
] | false |
[] |
import collections
import math
import statistics
from typing import TYPE_CHECKING, Any, Callable, Dict, List
class Timers(UserDict):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Add a private dictionary keeping track of all timings"""
super().__init__(*args, **kwargs)
self._timings: Dict[str, List[float]] = collections.defaultdict(list)
def stdev(self, name: str) -> float:
"""Standard deviation of timings"""
if name in self._timings:
value = self._timings[name]
return statistics.stdev(value) if len(value) >= 2 else math.nan
raise KeyError(name)
| true | 2 |
18 |
cookiecutter
|
cookiecutter.find
|
find_template
|
def find_template(repo_dir):
"""Determine which child directory of `repo_dir` is the project template.
:param repo_dir: Local directory of newly cloned repo.
:returns project_template: Relative path to project template.
"""
logger.debug('Searching %s for the project template.', repo_dir)
repo_dir_contents = os.listdir(repo_dir)
project_template = None
for item in repo_dir_contents:
if 'cookiecutter' in item and '{{' in item and '}}' in item:
project_template = item
break
if project_template:
project_template = os.path.join(repo_dir, project_template)
logger.debug('The project template appears to be %s', project_template)
return project_template
else:
raise NonTemplatedInputDirException
|
[
9,
30
] | false |
[
"logger"
] |
import logging
import os
from cookiecutter.exceptions import NonTemplatedInputDirException
logger = logging.getLogger(__name__)
def find_template(repo_dir):
"""Determine which child directory of `repo_dir` is the project template.
:param repo_dir: Local directory of newly cloned repo.
:returns project_template: Relative path to project template.
"""
logger.debug('Searching %s for the project template.', repo_dir)
repo_dir_contents = os.listdir(repo_dir)
project_template = None
for item in repo_dir_contents:
if 'cookiecutter' in item and '{{' in item and '}}' in item:
project_template = item
break
if project_template:
project_template = os.path.join(repo_dir, project_template)
logger.debug('The project template appears to be %s', project_template)
return project_template
else:
raise NonTemplatedInputDirException
| true | 2 |
|
19 |
cookiecutter
|
cookiecutter.prompt
|
read_user_choice
|
def read_user_choice(var_name, options):
"""Prompt the user to choose from several options for the given variable.
The first item will be returned if no input happens.
:param str var_name: Variable as specified in the context
:param list options: Sequence of options that are available to select from
:return: Exactly one item of ``options`` that has been chosen by the user
"""
# Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt
if not isinstance(options, list):
raise TypeError
if not options:
raise ValueError
choice_map = OrderedDict(
('{}'.format(i), value) for i, value in enumerate(options, 1)
)
choices = choice_map.keys()
default = '1'
choice_lines = ['{} - {}'.format(*c) for c in choice_map.items()]
prompt = '\n'.join(
(
'Select {}:'.format(var_name),
'\n'.join(choice_lines),
'Choose from {}'.format(', '.join(choices)),
)
)
user_choice = click.prompt(
prompt, type=click.Choice(choices), default=default, show_choices=False
)
return choice_map[user_choice]
|
[
43,
77
] | false |
[] |
import json
from collections import OrderedDict
import click
from jinja2.exceptions import UndefinedError
from cookiecutter.environment import StrictEnvironment
from cookiecutter.exceptions import UndefinedVariableInTemplate
def read_user_choice(var_name, options):
"""Prompt the user to choose from several options for the given variable.
The first item will be returned if no input happens.
:param str var_name: Variable as specified in the context
:param list options: Sequence of options that are available to select from
:return: Exactly one item of ``options`` that has been chosen by the user
"""
# Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt
if not isinstance(options, list):
raise TypeError
if not options:
raise ValueError
choice_map = OrderedDict(
('{}'.format(i), value) for i, value in enumerate(options, 1)
)
choices = choice_map.keys()
default = '1'
choice_lines = ['{} - {}'.format(*c) for c in choice_map.items()]
prompt = '\n'.join(
(
'Select {}:'.format(var_name),
'\n'.join(choice_lines),
'Choose from {}'.format(', '.join(choices)),
)
)
user_choice = click.prompt(
prompt, type=click.Choice(choices), default=default, show_choices=False
)
return choice_map[user_choice]
| true | 2 |
|
20 |
cookiecutter
|
cookiecutter.prompt
|
process_json
|
def process_json(user_value):
"""Load user-supplied value as a JSON dict.
:param str user_value: User-supplied value to load as a JSON dict
"""
try:
user_dict = json.loads(user_value, object_pairs_hook=OrderedDict)
except Exception:
# Leave it up to click to ask the user again
raise click.UsageError('Unable to decode to JSON.')
if not isinstance(user_dict, dict):
# Leave it up to click to ask the user again
raise click.UsageError('Requires JSON dict.')
return user_dict
|
[
80,
95
] | false |
[] |
import json
from collections import OrderedDict
import click
from jinja2.exceptions import UndefinedError
from cookiecutter.environment import StrictEnvironment
from cookiecutter.exceptions import UndefinedVariableInTemplate
def process_json(user_value):
"""Load user-supplied value as a JSON dict.
:param str user_value: User-supplied value to load as a JSON dict
"""
try:
user_dict = json.loads(user_value, object_pairs_hook=OrderedDict)
except Exception:
# Leave it up to click to ask the user again
raise click.UsageError('Unable to decode to JSON.')
if not isinstance(user_dict, dict):
# Leave it up to click to ask the user again
raise click.UsageError('Requires JSON dict.')
return user_dict
| true | 2 |
|
21 |
cookiecutter
|
cookiecutter.prompt
|
read_user_dict
|
def read_user_dict(var_name, default_value):
"""Prompt the user to provide a dictionary of data.
:param str var_name: Variable as specified in the context
:param default_value: Value that will be returned if no input is provided
:return: A Python dictionary to use in the context.
"""
# Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt
if not isinstance(default_value, dict):
raise TypeError
default_display = 'default'
user_value = click.prompt(
var_name, default=default_display, type=click.STRING, value_proc=process_json
)
if user_value == default_display:
# Return the given default w/o any processing
return default_value
return user_value
|
[
98,
118
] | false |
[] |
import json
from collections import OrderedDict
import click
from jinja2.exceptions import UndefinedError
from cookiecutter.environment import StrictEnvironment
from cookiecutter.exceptions import UndefinedVariableInTemplate
def read_user_dict(var_name, default_value):
"""Prompt the user to provide a dictionary of data.
:param str var_name: Variable as specified in the context
:param default_value: Value that will be returned if no input is provided
:return: A Python dictionary to use in the context.
"""
# Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt
if not isinstance(default_value, dict):
raise TypeError
default_display = 'default'
user_value = click.prompt(
var_name, default=default_display, type=click.STRING, value_proc=process_json
)
if user_value == default_display:
# Return the given default w/o any processing
return default_value
return user_value
| true | 2 |
|
22 |
cookiecutter
|
cookiecutter.prompt
|
render_variable
|
def render_variable(env, raw, cookiecutter_dict):
"""Render the next variable to be displayed in the user prompt.
Inside the prompting taken from the cookiecutter.json file, this renders
the next variable. For example, if a project_name is "Peanut Butter
Cookie", the repo_name could be be rendered with:
`{{ cookiecutter.project_name.replace(" ", "_") }}`.
This is then presented to the user as the default.
:param Environment env: A Jinja2 Environment object.
:param raw: The next value to be prompted for by the user.
:param dict cookiecutter_dict: The current context as it's gradually
being populated with variables.
:return: The rendered value for the default variable.
"""
if raw is None:
return None
elif isinstance(raw, dict):
return {
render_variable(env, k, cookiecutter_dict): render_variable(
env, v, cookiecutter_dict
)
for k, v in raw.items()
}
elif isinstance(raw, list):
return [render_variable(env, v, cookiecutter_dict) for v in raw]
elif not isinstance(raw, str):
raw = str(raw)
template = env.from_string(raw)
rendered_template = template.render(cookiecutter=cookiecutter_dict)
return rendered_template
|
[
121,
155
] | false |
[] |
import json
from collections import OrderedDict
import click
from jinja2.exceptions import UndefinedError
from cookiecutter.environment import StrictEnvironment
from cookiecutter.exceptions import UndefinedVariableInTemplate
def render_variable(env, raw, cookiecutter_dict):
"""Render the next variable to be displayed in the user prompt.
Inside the prompting taken from the cookiecutter.json file, this renders
the next variable. For example, if a project_name is "Peanut Butter
Cookie", the repo_name could be be rendered with:
`{{ cookiecutter.project_name.replace(" ", "_") }}`.
This is then presented to the user as the default.
:param Environment env: A Jinja2 Environment object.
:param raw: The next value to be prompted for by the user.
:param dict cookiecutter_dict: The current context as it's gradually
being populated with variables.
:return: The rendered value for the default variable.
"""
if raw is None:
return None
elif isinstance(raw, dict):
return {
render_variable(env, k, cookiecutter_dict): render_variable(
env, v, cookiecutter_dict
)
for k, v in raw.items()
}
elif isinstance(raw, list):
return [render_variable(env, v, cookiecutter_dict) for v in raw]
elif not isinstance(raw, str):
raw = str(raw)
template = env.from_string(raw)
rendered_template = template.render(cookiecutter=cookiecutter_dict)
return rendered_template
| true | 2 |
|
23 |
cookiecutter
|
cookiecutter.prompt
|
prompt_choice_for_config
|
def prompt_choice_for_config(cookiecutter_dict, env, key, options, no_input):
"""Prompt user with a set of options to choose from.
Each of the possible choices is rendered beforehand.
"""
rendered_options = [render_variable(env, raw, cookiecutter_dict) for raw in options]
if no_input:
return rendered_options[0]
return read_user_choice(key, rendered_options)
|
[
158,
167
] | false |
[] |
import json
from collections import OrderedDict
import click
from jinja2.exceptions import UndefinedError
from cookiecutter.environment import StrictEnvironment
from cookiecutter.exceptions import UndefinedVariableInTemplate
def prompt_choice_for_config(cookiecutter_dict, env, key, options, no_input):
"""Prompt user with a set of options to choose from.
Each of the possible choices is rendered beforehand.
"""
rendered_options = [render_variable(env, raw, cookiecutter_dict) for raw in options]
if no_input:
return rendered_options[0]
return read_user_choice(key, rendered_options)
| true | 2 |
|
24 |
cookiecutter
|
cookiecutter.prompt
|
prompt_for_config
|
def prompt_for_config(context, no_input=False):
"""Prompt user to enter a new config.
:param dict context: Source for field names and sample values.
:param no_input: Prompt the user at command line for manual configuration?
"""
cookiecutter_dict = OrderedDict([])
env = StrictEnvironment(context=context)
# First pass: Handle simple and raw variables, plus choices.
# These must be done first because the dictionaries keys and
# values might refer to them.
for key, raw in context['cookiecutter'].items():
if key.startswith('_') and not key.startswith('__'):
cookiecutter_dict[key] = raw
continue
elif key.startswith('__'):
cookiecutter_dict[key] = render_variable(env, raw, cookiecutter_dict)
continue
try:
if isinstance(raw, list):
# We are dealing with a choice variable
val = prompt_choice_for_config(
cookiecutter_dict, env, key, raw, no_input
)
cookiecutter_dict[key] = val
elif not isinstance(raw, dict):
# We are dealing with a regular variable
val = render_variable(env, raw, cookiecutter_dict)
if not no_input:
val = read_user_variable(key, val)
cookiecutter_dict[key] = val
except UndefinedError as err:
msg = "Unable to render variable '{}'".format(key)
raise UndefinedVariableInTemplate(msg, err, context)
# Second pass; handle the dictionaries.
for key, raw in context['cookiecutter'].items():
# Skip private type dicts not ot be rendered.
if key.startswith('_') and not key.startswith('__'):
continue
try:
if isinstance(raw, dict):
# We are dealing with a dict variable
val = render_variable(env, raw, cookiecutter_dict)
if not no_input and not key.startswith('__'):
val = read_user_dict(key, val)
cookiecutter_dict[key] = val
except UndefinedError as err:
msg = "Unable to render variable '{}'".format(key)
raise UndefinedVariableInTemplate(msg, err, context)
return cookiecutter_dict
|
[
170,
228
] | false |
[] |
import json
from collections import OrderedDict
import click
from jinja2.exceptions import UndefinedError
from cookiecutter.environment import StrictEnvironment
from cookiecutter.exceptions import UndefinedVariableInTemplate
def prompt_for_config(context, no_input=False):
"""Prompt user to enter a new config.
:param dict context: Source for field names and sample values.
:param no_input: Prompt the user at command line for manual configuration?
"""
cookiecutter_dict = OrderedDict([])
env = StrictEnvironment(context=context)
# First pass: Handle simple and raw variables, plus choices.
# These must be done first because the dictionaries keys and
# values might refer to them.
for key, raw in context['cookiecutter'].items():
if key.startswith('_') and not key.startswith('__'):
cookiecutter_dict[key] = raw
continue
elif key.startswith('__'):
cookiecutter_dict[key] = render_variable(env, raw, cookiecutter_dict)
continue
try:
if isinstance(raw, list):
# We are dealing with a choice variable
val = prompt_choice_for_config(
cookiecutter_dict, env, key, raw, no_input
)
cookiecutter_dict[key] = val
elif not isinstance(raw, dict):
# We are dealing with a regular variable
val = render_variable(env, raw, cookiecutter_dict)
if not no_input:
val = read_user_variable(key, val)
cookiecutter_dict[key] = val
except UndefinedError as err:
msg = "Unable to render variable '{}'".format(key)
raise UndefinedVariableInTemplate(msg, err, context)
# Second pass; handle the dictionaries.
for key, raw in context['cookiecutter'].items():
# Skip private type dicts not ot be rendered.
if key.startswith('_') and not key.startswith('__'):
continue
try:
if isinstance(raw, dict):
# We are dealing with a dict variable
val = render_variable(env, raw, cookiecutter_dict)
if not no_input and not key.startswith('__'):
val = read_user_dict(key, val)
cookiecutter_dict[key] = val
except UndefinedError as err:
msg = "Unable to render variable '{}'".format(key)
raise UndefinedVariableInTemplate(msg, err, context)
return cookiecutter_dict
| true | 2 |
|
25 |
cookiecutter
|
cookiecutter.replay
|
get_file_name
|
def get_file_name(replay_dir, template_name):
"""Get the name of file."""
suffix = '.json' if not template_name.endswith('.json') else ''
file_name = '{}{}'.format(template_name, suffix)
return os.path.join(replay_dir, file_name)
|
[
11,
15
] | false |
[] |
import json
import os
from cookiecutter.utils import make_sure_path_exists
def get_file_name(replay_dir, template_name):
"""Get the name of file."""
suffix = '.json' if not template_name.endswith('.json') else ''
file_name = '{}{}'.format(template_name, suffix)
return os.path.join(replay_dir, file_name)
| false | 0 |
|
26 |
cookiecutter
|
cookiecutter.replay
|
dump
|
def dump(replay_dir, template_name, context):
"""Write json data to file."""
if not make_sure_path_exists(replay_dir):
raise IOError('Unable to create replay dir at {}'.format(replay_dir))
if not isinstance(template_name, str):
raise TypeError('Template name is required to be of type str')
if not isinstance(context, dict):
raise TypeError('Context is required to be of type dict')
if 'cookiecutter' not in context:
raise ValueError('Context is required to contain a cookiecutter key')
replay_file = get_file_name(replay_dir, template_name)
with open(replay_file, 'w') as outfile:
json.dump(context, outfile, indent=2)
|
[
18,
35
] | false |
[] |
import json
import os
from cookiecutter.utils import make_sure_path_exists
def dump(replay_dir, template_name, context):
"""Write json data to file."""
if not make_sure_path_exists(replay_dir):
raise IOError('Unable to create replay dir at {}'.format(replay_dir))
if not isinstance(template_name, str):
raise TypeError('Template name is required to be of type str')
if not isinstance(context, dict):
raise TypeError('Context is required to be of type dict')
if 'cookiecutter' not in context:
raise ValueError('Context is required to contain a cookiecutter key')
replay_file = get_file_name(replay_dir, template_name)
with open(replay_file, 'w') as outfile:
json.dump(context, outfile, indent=2)
| true | 2 |
|
27 |
cookiecutter
|
cookiecutter.replay
|
load
|
def load(replay_dir, template_name):
"""Read json data from file."""
if not isinstance(template_name, str):
raise TypeError('Template name is required to be of type str')
replay_file = get_file_name(replay_dir, template_name)
with open(replay_file, 'r') as infile:
context = json.load(infile)
if 'cookiecutter' not in context:
raise ValueError('Context is required to contain a cookiecutter key')
return context
|
[
38,
51
] | false |
[] |
import json
import os
from cookiecutter.utils import make_sure_path_exists
def load(replay_dir, template_name):
"""Read json data from file."""
if not isinstance(template_name, str):
raise TypeError('Template name is required to be of type str')
replay_file = get_file_name(replay_dir, template_name)
with open(replay_file, 'r') as infile:
context = json.load(infile)
if 'cookiecutter' not in context:
raise ValueError('Context is required to contain a cookiecutter key')
return context
| true | 2 |
|
28 |
cookiecutter
|
cookiecutter.repository
|
expand_abbreviations
|
def expand_abbreviations(template, abbreviations):
"""Expand abbreviations in a template name.
:param template: The project template name.
:param abbreviations: Abbreviation definitions.
"""
if template in abbreviations:
return abbreviations[template]
# Split on colon. If there is no colon, rest will be empty
# and prefix will be the whole template
prefix, sep, rest = template.partition(':')
if prefix in abbreviations:
return abbreviations[prefix].format(rest)
return template
|
[
30,
45
] | false |
[
"REPO_REGEX"
] |
import os
import re
from cookiecutter.exceptions import RepositoryNotFound
from cookiecutter.vcs import clone
from cookiecutter.zipfile import unzip
REPO_REGEX = re.compile(
r"""
# something like git:// ssh:// file:// etc.
((((git|hg)\+)?(git|ssh|file|https?):(//)?)
| # or
(\w+@[\w\.]+) # something like user@...
)
""",
re.VERBOSE,
)
def expand_abbreviations(template, abbreviations):
"""Expand abbreviations in a template name.
:param template: The project template name.
:param abbreviations: Abbreviation definitions.
"""
if template in abbreviations:
return abbreviations[template]
# Split on colon. If there is no colon, rest will be empty
# and prefix will be the whole template
prefix, sep, rest = template.partition(':')
if prefix in abbreviations:
return abbreviations[prefix].format(rest)
return template
| true | 2 |
|
29 |
cookiecutter
|
cookiecutter.repository
|
repository_has_cookiecutter_json
|
def repository_has_cookiecutter_json(repo_directory):
"""Determine if `repo_directory` contains a `cookiecutter.json` file.
:param repo_directory: The candidate repository directory.
:return: True if the `repo_directory` is valid, else False.
"""
repo_directory_exists = os.path.isdir(repo_directory)
repo_config_exists = os.path.isfile(
os.path.join(repo_directory, 'cookiecutter.json')
)
return repo_directory_exists and repo_config_exists
|
[
48,
59
] | false |
[
"REPO_REGEX"
] |
import os
import re
from cookiecutter.exceptions import RepositoryNotFound
from cookiecutter.vcs import clone
from cookiecutter.zipfile import unzip
REPO_REGEX = re.compile(
r"""
# something like git:// ssh:// file:// etc.
((((git|hg)\+)?(git|ssh|file|https?):(//)?)
| # or
(\w+@[\w\.]+) # something like user@...
)
""",
re.VERBOSE,
)
def repository_has_cookiecutter_json(repo_directory):
"""Determine if `repo_directory` contains a `cookiecutter.json` file.
:param repo_directory: The candidate repository directory.
:return: True if the `repo_directory` is valid, else False.
"""
repo_directory_exists = os.path.isdir(repo_directory)
repo_config_exists = os.path.isfile(
os.path.join(repo_directory, 'cookiecutter.json')
)
return repo_directory_exists and repo_config_exists
| false | 0 |
|
30 |
cookiecutter
|
cookiecutter.repository
|
determine_repo_dir
|
def determine_repo_dir(
template,
abbreviations,
clone_to_dir,
checkout,
no_input,
password=None,
directory=None,
):
"""
Locate the repository directory from a template reference.
Applies repository abbreviations to the template reference.
If the template refers to a repository URL, clone it.
If the template is a path to a local repository, use it.
:param template: A directory containing a project template directory,
or a URL to a git repository.
:param abbreviations: A dictionary of repository abbreviation
definitions.
:param clone_to_dir: The directory to clone the repository into.
:param checkout: The branch, tag or commit ID to checkout after clone.
:param no_input: Prompt the user at command line for manual configuration?
:param password: The password to use when extracting the repository.
:param directory: Directory within repo where cookiecutter.json lives.
:return: A tuple containing the cookiecutter template directory, and
a boolean descriving whether that directory should be cleaned up
after the template has been instantiated.
:raises: `RepositoryNotFound` if a repository directory could not be found.
"""
template = expand_abbreviations(template, abbreviations)
if is_zip_file(template):
unzipped_dir = unzip(
zip_uri=template,
is_url=is_repo_url(template),
clone_to_dir=clone_to_dir,
no_input=no_input,
password=password,
)
repository_candidates = [unzipped_dir]
cleanup = True
elif is_repo_url(template):
cloned_repo = clone(
repo_url=template,
checkout=checkout,
clone_to_dir=clone_to_dir,
no_input=no_input,
)
repository_candidates = [cloned_repo]
cleanup = False
else:
repository_candidates = [template, os.path.join(clone_to_dir, template)]
cleanup = False
if directory:
repository_candidates = [
os.path.join(s, directory) for s in repository_candidates
]
for repo_candidate in repository_candidates:
if repository_has_cookiecutter_json(repo_candidate):
return repo_candidate, cleanup
raise RepositoryNotFound(
'A valid repository for "{}" could not be found in the following '
'locations:\n{}'.format(template, '\n'.join(repository_candidates))
)
|
[
62,
126
] | false |
[
"REPO_REGEX"
] |
import os
import re
from cookiecutter.exceptions import RepositoryNotFound
from cookiecutter.vcs import clone
from cookiecutter.zipfile import unzip
REPO_REGEX = re.compile(
r"""
# something like git:// ssh:// file:// etc.
((((git|hg)\+)?(git|ssh|file|https?):(//)?)
| # or
(\w+@[\w\.]+) # something like user@...
)
""",
re.VERBOSE,
)
def determine_repo_dir(
template,
abbreviations,
clone_to_dir,
checkout,
no_input,
password=None,
directory=None,
):
"""
Locate the repository directory from a template reference.
Applies repository abbreviations to the template reference.
If the template refers to a repository URL, clone it.
If the template is a path to a local repository, use it.
:param template: A directory containing a project template directory,
or a URL to a git repository.
:param abbreviations: A dictionary of repository abbreviation
definitions.
:param clone_to_dir: The directory to clone the repository into.
:param checkout: The branch, tag or commit ID to checkout after clone.
:param no_input: Prompt the user at command line for manual configuration?
:param password: The password to use when extracting the repository.
:param directory: Directory within repo where cookiecutter.json lives.
:return: A tuple containing the cookiecutter template directory, and
a boolean descriving whether that directory should be cleaned up
after the template has been instantiated.
:raises: `RepositoryNotFound` if a repository directory could not be found.
"""
template = expand_abbreviations(template, abbreviations)
if is_zip_file(template):
unzipped_dir = unzip(
zip_uri=template,
is_url=is_repo_url(template),
clone_to_dir=clone_to_dir,
no_input=no_input,
password=password,
)
repository_candidates = [unzipped_dir]
cleanup = True
elif is_repo_url(template):
cloned_repo = clone(
repo_url=template,
checkout=checkout,
clone_to_dir=clone_to_dir,
no_input=no_input,
)
repository_candidates = [cloned_repo]
cleanup = False
else:
repository_candidates = [template, os.path.join(clone_to_dir, template)]
cleanup = False
if directory:
repository_candidates = [
os.path.join(s, directory) for s in repository_candidates
]
for repo_candidate in repository_candidates:
if repository_has_cookiecutter_json(repo_candidate):
return repo_candidate, cleanup
raise RepositoryNotFound(
'A valid repository for "{}" could not be found in the following '
'locations:\n{}'.format(template, '\n'.join(repository_candidates))
)
| true | 2 |
|
31 |
cookiecutter
|
cookiecutter.zipfile
|
unzip
|
def unzip(zip_uri, is_url, clone_to_dir='.', no_input=False, password=None):
"""Download and unpack a zipfile at a given URI.
This will download the zipfile to the cookiecutter repository,
and unpack into a temporary directory.
:param zip_uri: The URI for the zipfile.
:param is_url: Is the zip URI a URL or a file?
:param clone_to_dir: The cookiecutter repository directory
to put the archive into.
:param no_input: Suppress any prompts
:param password: The password to use when unpacking the repository.
"""
# Ensure that clone_to_dir exists
clone_to_dir = os.path.expanduser(clone_to_dir)
make_sure_path_exists(clone_to_dir)
if is_url:
# Build the name of the cached zipfile,
# and prompt to delete if it already exists.
identifier = zip_uri.rsplit('/', 1)[1]
zip_path = os.path.join(clone_to_dir, identifier)
if os.path.exists(zip_path):
download = prompt_and_delete(zip_path, no_input=no_input)
else:
download = True
if download:
# (Re) download the zipfile
r = requests.get(zip_uri, stream=True)
with open(zip_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
else:
# Just use the local zipfile as-is.
zip_path = os.path.abspath(zip_uri)
# Now unpack the repository. The zipfile will be unpacked
# into a temporary directory
try:
zip_file = ZipFile(zip_path)
if len(zip_file.namelist()) == 0:
raise InvalidZipRepository('Zip repository {} is empty'.format(zip_uri))
# The first record in the zipfile should be the directory entry for
# the archive. If it isn't a directory, there's a problem.
first_filename = zip_file.namelist()[0]
if not first_filename.endswith('/'):
raise InvalidZipRepository(
'Zip repository {} does not include '
'a top-level directory'.format(zip_uri)
)
# Construct the final target directory
project_name = first_filename[:-1]
unzip_base = tempfile.mkdtemp()
unzip_path = os.path.join(unzip_base, project_name)
# Extract the zip file into the temporary directory
try:
zip_file.extractall(path=unzip_base)
except RuntimeError:
# File is password protected; try to get a password from the
# environment; if that doesn't work, ask the user.
if password is not None:
try:
zip_file.extractall(path=unzip_base, pwd=password.encode('utf-8'))
except RuntimeError:
raise InvalidZipRepository(
'Invalid password provided for protected repository'
)
elif no_input:
raise InvalidZipRepository(
'Unable to unlock password protected repository'
)
else:
retry = 0
while retry is not None:
try:
password = read_repo_password('Repo password')
zip_file.extractall(
path=unzip_base, pwd=password.encode('utf-8')
)
retry = None
except RuntimeError:
retry += 1
if retry == 3:
raise InvalidZipRepository(
'Invalid password provided for protected repository'
)
except BadZipFile:
raise InvalidZipRepository(
'Zip repository {} is not a valid zip archive:'.format(zip_uri)
)
return unzip_path
|
[
12,
111
] | false |
[] |
import os
import tempfile
from zipfile import BadZipFile, ZipFile
import requests
from cookiecutter.exceptions import InvalidZipRepository
from cookiecutter.prompt import read_repo_password
from cookiecutter.utils import make_sure_path_exists, prompt_and_delete
def unzip(zip_uri, is_url, clone_to_dir='.', no_input=False, password=None):
"""Download and unpack a zipfile at a given URI.
This will download the zipfile to the cookiecutter repository,
and unpack into a temporary directory.
:param zip_uri: The URI for the zipfile.
:param is_url: Is the zip URI a URL or a file?
:param clone_to_dir: The cookiecutter repository directory
to put the archive into.
:param no_input: Suppress any prompts
:param password: The password to use when unpacking the repository.
"""
# Ensure that clone_to_dir exists
clone_to_dir = os.path.expanduser(clone_to_dir)
make_sure_path_exists(clone_to_dir)
if is_url:
# Build the name of the cached zipfile,
# and prompt to delete if it already exists.
identifier = zip_uri.rsplit('/', 1)[1]
zip_path = os.path.join(clone_to_dir, identifier)
if os.path.exists(zip_path):
download = prompt_and_delete(zip_path, no_input=no_input)
else:
download = True
if download:
# (Re) download the zipfile
r = requests.get(zip_uri, stream=True)
with open(zip_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
else:
# Just use the local zipfile as-is.
zip_path = os.path.abspath(zip_uri)
# Now unpack the repository. The zipfile will be unpacked
# into a temporary directory
try:
zip_file = ZipFile(zip_path)
if len(zip_file.namelist()) == 0:
raise InvalidZipRepository('Zip repository {} is empty'.format(zip_uri))
# The first record in the zipfile should be the directory entry for
# the archive. If it isn't a directory, there's a problem.
first_filename = zip_file.namelist()[0]
if not first_filename.endswith('/'):
raise InvalidZipRepository(
'Zip repository {} does not include '
'a top-level directory'.format(zip_uri)
)
# Construct the final target directory
project_name = first_filename[:-1]
unzip_base = tempfile.mkdtemp()
unzip_path = os.path.join(unzip_base, project_name)
# Extract the zip file into the temporary directory
try:
zip_file.extractall(path=unzip_base)
except RuntimeError:
# File is password protected; try to get a password from the
# environment; if that doesn't work, ask the user.
if password is not None:
try:
zip_file.extractall(path=unzip_base, pwd=password.encode('utf-8'))
except RuntimeError:
raise InvalidZipRepository(
'Invalid password provided for protected repository'
)
elif no_input:
raise InvalidZipRepository(
'Unable to unlock password protected repository'
)
else:
retry = 0
while retry is not None:
try:
password = read_repo_password('Repo password')
zip_file.extractall(
path=unzip_base, pwd=password.encode('utf-8')
)
retry = None
except RuntimeError:
retry += 1
if retry == 3:
raise InvalidZipRepository(
'Invalid password provided for protected repository'
)
except BadZipFile:
raise InvalidZipRepository(
'Zip repository {} is not a valid zip archive:'.format(zip_uri)
)
return unzip_path
| true | 2 |
|
32 |
dataclasses_json
|
dataclasses_json.cfg
|
config
|
def config(metadata: dict = None, *,
# TODO: these can be typed more precisely
# Specifically, a Callable[A, B], where `B` is bound as a JSON type
encoder: Callable = None,
decoder: Callable = None,
mm_field: MarshmallowField = None,
letter_case: Callable[[str], str] = None,
undefined: Optional[Union[str, Undefined]] = None,
field_name: str = None,
exclude: Optional[Callable[[str, T], bool]] = None,
) -> Dict[str, dict]:
if metadata is None:
metadata = {}
lib_metadata = metadata.setdefault('dataclasses_json', {})
if encoder is not None:
lib_metadata['encoder'] = encoder
if decoder is not None:
lib_metadata['decoder'] = decoder
if mm_field is not None:
lib_metadata['mm_field'] = mm_field
if field_name is not None:
if letter_case is not None:
@functools.wraps(letter_case)
def override(_, _letter_case=letter_case, _field_name=field_name):
return _letter_case(_field_name)
else:
def override(_, _field_name=field_name):
return _field_name
letter_case = override
if letter_case is not None:
lib_metadata['letter_case'] = letter_case
if undefined is not None:
# Get the corresponding action for undefined parameters
if isinstance(undefined, str):
if not hasattr(Undefined, undefined.upper()):
valid_actions = list(action.name for action in Undefined)
raise UndefinedParameterError(
f"Invalid undefined parameter action, "
f"must be one of {valid_actions}")
undefined = Undefined[undefined.upper()]
lib_metadata['undefined'] = undefined
if exclude is not None:
lib_metadata['exclude'] = exclude
return metadata
|
[
43,
96
] | false |
[
"T",
"global_config"
] |
import functools
from typing import Callable, Dict, Optional, TypeVar, Union
from marshmallow.fields import Field as MarshmallowField
from dataclasses_json.undefined import Undefined, UndefinedParameterError
T = TypeVar("T")
global_config = _GlobalConfig()
def config(metadata: dict = None, *,
# TODO: these can be typed more precisely
# Specifically, a Callable[A, B], where `B` is bound as a JSON type
encoder: Callable = None,
decoder: Callable = None,
mm_field: MarshmallowField = None,
letter_case: Callable[[str], str] = None,
undefined: Optional[Union[str, Undefined]] = None,
field_name: str = None,
exclude: Optional[Callable[[str, T], bool]] = None,
) -> Dict[str, dict]:
if metadata is None:
metadata = {}
lib_metadata = metadata.setdefault('dataclasses_json', {})
if encoder is not None:
lib_metadata['encoder'] = encoder
if decoder is not None:
lib_metadata['decoder'] = decoder
if mm_field is not None:
lib_metadata['mm_field'] = mm_field
if field_name is not None:
if letter_case is not None:
@functools.wraps(letter_case)
def override(_, _letter_case=letter_case, _field_name=field_name):
return _letter_case(_field_name)
else:
def override(_, _field_name=field_name):
return _field_name
letter_case = override
if letter_case is not None:
lib_metadata['letter_case'] = letter_case
if undefined is not None:
# Get the corresponding action for undefined parameters
if isinstance(undefined, str):
if not hasattr(Undefined, undefined.upper()):
valid_actions = list(action.name for action in Undefined)
raise UndefinedParameterError(
f"Invalid undefined parameter action, "
f"must be one of {valid_actions}")
undefined = Undefined[undefined.upper()]
lib_metadata['undefined'] = undefined
if exclude is not None:
lib_metadata['exclude'] = exclude
return metadata
| true | 2 |
|
33 |
dataclasses_json
|
dataclasses_json.core
|
_ExtendedEncoder
|
default
|
def default(self, o) -> Json:
result: Json
if _isinstance_safe(o, Collection):
if _isinstance_safe(o, Mapping):
result = dict(o)
else:
result = list(o)
elif _isinstance_safe(o, datetime):
result = o.timestamp()
elif _isinstance_safe(o, UUID):
result = str(o)
elif _isinstance_safe(o, Enum):
result = o.value
elif _isinstance_safe(o, Decimal):
result = str(o)
else:
result = json.JSONEncoder.default(self, o)
return result
|
[
32,
49
] | false |
[
"Json",
"confs",
"FieldOverride"
] |
import copy
import json
import warnings
from collections import defaultdict, namedtuple
from dataclasses import (MISSING,
_is_dataclass_instance,
fields,
is_dataclass # type: ignore
)
from datetime import datetime, timezone
from decimal import Decimal
from enum import Enum
from typing import Any, Collection, Mapping, Union, get_type_hints
from uuid import UUID
from typing_inspect import is_union_type
from dataclasses_json import cfg
from dataclasses_json.utils import (_get_type_cons,
_handle_undefined_parameters_safe,
_is_collection, _is_mapping, _is_new_type,
_is_optional, _isinstance_safe,
_issubclass_safe)
Json = Union[dict, list, str, int, float, bool, None]
confs = ['encoder', 'decoder', 'mm_field', 'letter_case', 'exclude']
FieldOverride = namedtuple('FieldOverride', confs)
class _ExtendedEncoder(json.JSONEncoder):
def default(self, o) -> Json:
result: Json
if _isinstance_safe(o, Collection):
if _isinstance_safe(o, Mapping):
result = dict(o)
else:
result = list(o)
elif _isinstance_safe(o, datetime):
result = o.timestamp()
elif _isinstance_safe(o, UUID):
result = str(o)
elif _isinstance_safe(o, Enum):
result = o.value
elif _isinstance_safe(o, Decimal):
result = str(o)
else:
result = json.JSONEncoder.default(self, o)
return result
| true | 2 |
34 |
dataclasses_json
|
dataclasses_json.mm
|
build_type
|
def build_type(type_, options, mixin, field, cls):
def inner(type_, options):
while True:
if not _is_new_type(type_):
break
type_ = type_.__supertype__
if is_dataclass(type_):
if _issubclass_safe(type_, mixin):
options['field_many'] = bool(
_is_supported_generic(field.type) and _is_collection(
field.type))
return fields.Nested(type_.schema(), **options)
else:
warnings.warn(f"Nested dataclass field {field.name} of type "
f"{field.type} detected in "
f"{cls.__name__} that is not an instance of "
f"dataclass_json. Did you mean to recursively "
f"serialize this field? If so, make sure to "
f"augment {type_} with either the "
f"`dataclass_json` decorator or mixin.")
return fields.Field(**options)
origin = getattr(type_, '__origin__', type_)
args = [inner(a, {}) for a in getattr(type_, '__args__', []) if
a is not type(None)]
if _is_optional(type_):
options["allow_none"] = True
if origin in TYPES:
return TYPES[origin](*args, **options)
if _issubclass_safe(origin, Enum):
return EnumField(enum=origin, by_value=True, *args, **options)
if is_union_type(type_):
union_types = [a for a in getattr(type_, '__args__', []) if
a is not type(None)]
union_desc = dict(zip(union_types, args))
return _UnionField(union_desc, cls, field, **options)
warnings.warn(
f"Unknown type {type_} at {cls.__name__}.{field.name}: {field.type} "
f"It's advised to pass the correct marshmallow type to `mm_field`.")
return fields.Field(**options)
return inner(type_, options)
|
[
226,
274
] | false |
[
"TYPES",
"A",
"JsonData",
"TEncoded",
"TOneOrMulti",
"TOneOrMultiEncoded"
] |
import typing
import warnings
import sys
from copy import deepcopy
from dataclasses import MISSING, is_dataclass, fields as dc_fields
from datetime import datetime
from decimal import Decimal
from uuid import UUID
from enum import Enum
from typing_inspect import is_union_type
from marshmallow import fields, Schema, post_load
from marshmallow_enum import EnumField
from marshmallow.exceptions import ValidationError
from dataclasses_json.core import (_is_supported_generic, _decode_dataclass,
_ExtendedEncoder, _user_overrides_or_exts)
from dataclasses_json.utils import (_is_collection, _is_optional,
_issubclass_safe, _timestamp_to_dt_aware,
_is_new_type, _get_type_origin,
_handle_undefined_parameters_safe,
CatchAllVar)
TYPES = {
typing.Mapping: fields.Mapping,
typing.MutableMapping: fields.Mapping,
typing.List: fields.List,
typing.Dict: fields.Dict,
typing.Tuple: fields.Tuple,
typing.Callable: fields.Function,
typing.Any: fields.Raw,
dict: fields.Dict,
list: fields.List,
str: fields.Str,
int: fields.Int,
float: fields.Float,
bool: fields.Bool,
datetime: _TimestampField,
UUID: fields.UUID,
Decimal: fields.Decimal,
CatchAllVar: fields.Dict,
}
A = typing.TypeVar('A')
JsonData = typing.Union[str, bytes, bytearray]
TEncoded = typing.Dict[str, typing.Any]
TOneOrMulti = typing.Union[typing.List[A], A]
TOneOrMultiEncoded = typing.Union[typing.List[TEncoded], TEncoded]
class _UnionField(fields.Field):
def __init__(self, desc, cls, field, *args, **kwargs):
self.desc = desc
self.cls = cls
self.field = field
super().__init__(*args, **kwargs)
def build_type(type_, options, mixin, field, cls):
def inner(type_, options):
while True:
if not _is_new_type(type_):
break
type_ = type_.__supertype__
if is_dataclass(type_):
if _issubclass_safe(type_, mixin):
options['field_many'] = bool(
_is_supported_generic(field.type) and _is_collection(
field.type))
return fields.Nested(type_.schema(), **options)
else:
warnings.warn(f"Nested dataclass field {field.name} of type "
f"{field.type} detected in "
f"{cls.__name__} that is not an instance of "
f"dataclass_json. Did you mean to recursively "
f"serialize this field? If so, make sure to "
f"augment {type_} with either the "
f"`dataclass_json` decorator or mixin.")
return fields.Field(**options)
origin = getattr(type_, '__origin__', type_)
args = [inner(a, {}) for a in getattr(type_, '__args__', []) if
a is not type(None)]
if _is_optional(type_):
options["allow_none"] = True
if origin in TYPES:
return TYPES[origin](*args, **options)
if _issubclass_safe(origin, Enum):
return EnumField(enum=origin, by_value=True, *args, **options)
if is_union_type(type_):
union_types = [a for a in getattr(type_, '__args__', []) if
a is not type(None)]
union_desc = dict(zip(union_types, args))
return _UnionField(union_desc, cls, field, **options)
warnings.warn(
f"Unknown type {type_} at {cls.__name__}.{field.name}: {field.type} "
f"It's advised to pass the correct marshmallow type to `mm_field`.")
return fields.Field(**options)
return inner(type_, options)
| true | 2 |
|
35 |
dataclasses_json
|
dataclasses_json.mm
|
schema
|
def schema(cls, mixin, infer_missing):
schema = {}
overrides = _user_overrides_or_exts(cls)
# TODO check the undefined parameters and add the proper schema action
# https://marshmallow.readthedocs.io/en/stable/quickstart.html
for field in dc_fields(cls):
metadata = (field.metadata or {}).get('dataclasses_json', {})
metadata = overrides[field.name]
if metadata.mm_field is not None:
schema[field.name] = metadata.mm_field
else:
type_ = field.type
options = {}
missing_key = 'missing' if infer_missing else 'default'
if field.default is not MISSING:
options[missing_key] = field.default
elif field.default_factory is not MISSING:
options[missing_key] = field.default_factory
if options.get(missing_key, ...) is None:
options['allow_none'] = True
if _is_optional(type_):
options.setdefault(missing_key, None)
options['allow_none'] = True
if len(type_.__args__) == 2:
# Union[str, int, None] is optional too, but it has more than 1 typed field.
type_ = type_.__args__[0]
if metadata.letter_case is not None:
options['data_key'] = metadata.letter_case(field.name)
t = build_type(type_, options, mixin, field, cls)
# if type(t) is not fields.Field: # If we use `isinstance` we would return nothing.
if field.type != typing.Optional[CatchAllVar]:
schema[field.name] = t
return schema
|
[
277,
314
] | false |
[
"TYPES",
"A",
"JsonData",
"TEncoded",
"TOneOrMulti",
"TOneOrMultiEncoded"
] |
import typing
import warnings
import sys
from copy import deepcopy
from dataclasses import MISSING, is_dataclass, fields as dc_fields
from datetime import datetime
from decimal import Decimal
from uuid import UUID
from enum import Enum
from typing_inspect import is_union_type
from marshmallow import fields, Schema, post_load
from marshmallow_enum import EnumField
from marshmallow.exceptions import ValidationError
from dataclasses_json.core import (_is_supported_generic, _decode_dataclass,
_ExtendedEncoder, _user_overrides_or_exts)
from dataclasses_json.utils import (_is_collection, _is_optional,
_issubclass_safe, _timestamp_to_dt_aware,
_is_new_type, _get_type_origin,
_handle_undefined_parameters_safe,
CatchAllVar)
TYPES = {
typing.Mapping: fields.Mapping,
typing.MutableMapping: fields.Mapping,
typing.List: fields.List,
typing.Dict: fields.Dict,
typing.Tuple: fields.Tuple,
typing.Callable: fields.Function,
typing.Any: fields.Raw,
dict: fields.Dict,
list: fields.List,
str: fields.Str,
int: fields.Int,
float: fields.Float,
bool: fields.Bool,
datetime: _TimestampField,
UUID: fields.UUID,
Decimal: fields.Decimal,
CatchAllVar: fields.Dict,
}
A = typing.TypeVar('A')
JsonData = typing.Union[str, bytes, bytearray]
TEncoded = typing.Dict[str, typing.Any]
TOneOrMulti = typing.Union[typing.List[A], A]
TOneOrMultiEncoded = typing.Union[typing.List[TEncoded], TEncoded]
def schema(cls, mixin, infer_missing):
schema = {}
overrides = _user_overrides_or_exts(cls)
# TODO check the undefined parameters and add the proper schema action
# https://marshmallow.readthedocs.io/en/stable/quickstart.html
for field in dc_fields(cls):
metadata = (field.metadata or {}).get('dataclasses_json', {})
metadata = overrides[field.name]
if metadata.mm_field is not None:
schema[field.name] = metadata.mm_field
else:
type_ = field.type
options = {}
missing_key = 'missing' if infer_missing else 'default'
if field.default is not MISSING:
options[missing_key] = field.default
elif field.default_factory is not MISSING:
options[missing_key] = field.default_factory
if options.get(missing_key, ...) is None:
options['allow_none'] = True
if _is_optional(type_):
options.setdefault(missing_key, None)
options['allow_none'] = True
if len(type_.__args__) == 2:
# Union[str, int, None] is optional too, but it has more than 1 typed field.
type_ = type_.__args__[0]
if metadata.letter_case is not None:
options['data_key'] = metadata.letter_case(field.name)
t = build_type(type_, options, mixin, field, cls)
# if type(t) is not fields.Field: # If we use `isinstance` we would return nothing.
if field.type != typing.Optional[CatchAllVar]:
schema[field.name] = t
return schema
| true | 2 |
|
36 |
dataclasses_json
|
dataclasses_json.mm
|
build_schema
|
def build_schema(cls: typing.Type[A],
mixin,
infer_missing,
partial) -> typing.Type[SchemaType]:
Meta = type('Meta',
(),
{'fields': tuple(field.name for field in dc_fields(cls)
if
field.name != 'dataclass_json_config' and field.type !=
typing.Optional[CatchAllVar]),
# TODO #180
# 'render_module': global_config.json_module
})
@post_load
def make_instance(self, kvs, **kwargs):
return _decode_dataclass(cls, kvs, partial)
def dumps(self, *args, **kwargs):
if 'cls' not in kwargs:
kwargs['cls'] = _ExtendedEncoder
return Schema.dumps(self, *args, **kwargs)
def dump(self, obj, *, many=None):
dumped = Schema.dump(self, obj, many=many)
# TODO This is hacky, but the other option I can think of is to generate a different schema
# depending on dump and load, which is even more hacky
# The only problem is the catch all field, we can't statically create a schema for it
# so we just update the dumped dict
if many:
for i, _obj in enumerate(obj):
dumped[i].update(
_handle_undefined_parameters_safe(cls=_obj, kvs={},
usage="dump"))
else:
dumped.update(_handle_undefined_parameters_safe(cls=obj, kvs={},
usage="dump"))
return dumped
schema_ = schema(cls, mixin, infer_missing)
DataClassSchema: typing.Type[SchemaType] = type(
f'{cls.__name__.capitalize()}Schema',
(Schema,),
{'Meta': Meta,
f'make_{cls.__name__.lower()}': make_instance,
'dumps': dumps,
'dump': dump,
**schema_})
return DataClassSchema
|
[
317,
368
] | false |
[
"TYPES",
"A",
"JsonData",
"TEncoded",
"TOneOrMulti",
"TOneOrMultiEncoded"
] |
import typing
import warnings
import sys
from copy import deepcopy
from dataclasses import MISSING, is_dataclass, fields as dc_fields
from datetime import datetime
from decimal import Decimal
from uuid import UUID
from enum import Enum
from typing_inspect import is_union_type
from marshmallow import fields, Schema, post_load
from marshmallow_enum import EnumField
from marshmallow.exceptions import ValidationError
from dataclasses_json.core import (_is_supported_generic, _decode_dataclass,
_ExtendedEncoder, _user_overrides_or_exts)
from dataclasses_json.utils import (_is_collection, _is_optional,
_issubclass_safe, _timestamp_to_dt_aware,
_is_new_type, _get_type_origin,
_handle_undefined_parameters_safe,
CatchAllVar)
TYPES = {
typing.Mapping: fields.Mapping,
typing.MutableMapping: fields.Mapping,
typing.List: fields.List,
typing.Dict: fields.Dict,
typing.Tuple: fields.Tuple,
typing.Callable: fields.Function,
typing.Any: fields.Raw,
dict: fields.Dict,
list: fields.List,
str: fields.Str,
int: fields.Int,
float: fields.Float,
bool: fields.Bool,
datetime: _TimestampField,
UUID: fields.UUID,
Decimal: fields.Decimal,
CatchAllVar: fields.Dict,
}
A = typing.TypeVar('A')
JsonData = typing.Union[str, bytes, bytearray]
TEncoded = typing.Dict[str, typing.Any]
TOneOrMulti = typing.Union[typing.List[A], A]
TOneOrMultiEncoded = typing.Union[typing.List[TEncoded], TEncoded]
def build_schema(cls: typing.Type[A],
mixin,
infer_missing,
partial) -> typing.Type[SchemaType]:
Meta = type('Meta',
(),
{'fields': tuple(field.name for field in dc_fields(cls)
if
field.name != 'dataclass_json_config' and field.type !=
typing.Optional[CatchAllVar]),
# TODO #180
# 'render_module': global_config.json_module
})
@post_load
def make_instance(self, kvs, **kwargs):
return _decode_dataclass(cls, kvs, partial)
def dumps(self, *args, **kwargs):
if 'cls' not in kwargs:
kwargs['cls'] = _ExtendedEncoder
return Schema.dumps(self, *args, **kwargs)
def dump(self, obj, *, many=None):
dumped = Schema.dump(self, obj, many=many)
# TODO This is hacky, but the other option I can think of is to generate a different schema
# depending on dump and load, which is even more hacky
# The only problem is the catch all field, we can't statically create a schema for it
# so we just update the dumped dict
if many:
for i, _obj in enumerate(obj):
dumped[i].update(
_handle_undefined_parameters_safe(cls=_obj, kvs={},
usage="dump"))
else:
dumped.update(_handle_undefined_parameters_safe(cls=obj, kvs={},
usage="dump"))
return dumped
schema_ = schema(cls, mixin, infer_missing)
DataClassSchema: typing.Type[SchemaType] = type(
f'{cls.__name__.capitalize()}Schema',
(Schema,),
{'Meta': Meta,
f'make_{cls.__name__.lower()}': make_instance,
'dumps': dumps,
'dump': dump,
**schema_})
return DataClassSchema
| true | 2 |
|
37 |
dataclasses_json
|
dataclasses_json.undefined
|
_UndefinedParameterAction
|
handle_from_dict
|
@staticmethod
@abc.abstractmethod
def handle_from_dict(cls, kvs: Dict[Any, Any]) -> Dict[str, Any]:
"""
Return the parameters to initialize the class with.
"""
pass
|
[
19,
23
] | false |
[
"KnownParameters",
"UnknownParameters",
"CatchAll"
] |
import abc
import dataclasses
import functools
import inspect
from dataclasses import Field, fields
from typing import Any, Callable, Dict, Optional, Tuple
from enum import Enum
from marshmallow import ValidationError
from dataclasses_json.utils import CatchAllVar
KnownParameters = Dict[str, Any]
UnknownParameters = Dict[str, Any]
CatchAll = Optional[CatchAllVar]
class _UndefinedParameterAction(abc.ABC):
@staticmethod
@abc.abstractmethod
def handle_from_dict(cls, kvs: Dict[Any, Any]) -> Dict[str, Any]:
"""
Return the parameters to initialize the class with.
"""
pass
| false | 0 |
38 |
dataclasses_json
|
dataclasses_json.undefined
|
_UndefinedParameterAction
|
handle_to_dict
|
@staticmethod
def handle_to_dict(obj, kvs: Dict[Any, Any]) -> Dict[Any, Any]:
"""
Return the parameters that will be written to the output dict
"""
return kvs
|
[
26,
30
] | false |
[
"KnownParameters",
"UnknownParameters",
"CatchAll"
] |
import abc
import dataclasses
import functools
import inspect
from dataclasses import Field, fields
from typing import Any, Callable, Dict, Optional, Tuple
from enum import Enum
from marshmallow import ValidationError
from dataclasses_json.utils import CatchAllVar
KnownParameters = Dict[str, Any]
UnknownParameters = Dict[str, Any]
CatchAll = Optional[CatchAllVar]
class _UndefinedParameterAction(abc.ABC):
@staticmethod
def handle_to_dict(obj, kvs: Dict[Any, Any]) -> Dict[Any, Any]:
"""
Return the parameters that will be written to the output dict
"""
return kvs
| false | 0 |
39 |
dataclasses_json
|
dataclasses_json.undefined
|
_UndefinedParameterAction
|
handle_dump
|
@staticmethod
def handle_dump(obj) -> Dict[Any, Any]:
"""
Return the parameters that will be added to the schema dump.
"""
return {}
|
[
33,
37
] | false |
[
"KnownParameters",
"UnknownParameters",
"CatchAll"
] |
import abc
import dataclasses
import functools
import inspect
from dataclasses import Field, fields
from typing import Any, Callable, Dict, Optional, Tuple
from enum import Enum
from marshmallow import ValidationError
from dataclasses_json.utils import CatchAllVar
KnownParameters = Dict[str, Any]
UnknownParameters = Dict[str, Any]
CatchAll = Optional[CatchAllVar]
class _UndefinedParameterAction(abc.ABC):
@staticmethod
def handle_dump(obj) -> Dict[Any, Any]:
"""
Return the parameters that will be added to the schema dump.
"""
return {}
| false | 0 |
40 |
dataclasses_json
|
dataclasses_json.undefined
|
_UndefinedParameterAction
|
create_init
|
@staticmethod
def create_init(obj) -> Callable:
return obj.__init__
|
[
40,
41
] | false |
[
"KnownParameters",
"UnknownParameters",
"CatchAll"
] |
import abc
import dataclasses
import functools
import inspect
from dataclasses import Field, fields
from typing import Any, Callable, Dict, Optional, Tuple
from enum import Enum
from marshmallow import ValidationError
from dataclasses_json.utils import CatchAllVar
KnownParameters = Dict[str, Any]
UnknownParameters = Dict[str, Any]
CatchAll = Optional[CatchAllVar]
class _UndefinedParameterAction(abc.ABC):
@staticmethod
def create_init(obj) -> Callable:
return obj.__init__
| false | 0 |
41 |
dataclasses_json
|
dataclasses_json.undefined
|
_RaiseUndefinedParameters
|
handle_from_dict
|
@staticmethod
def handle_from_dict(cls, kvs: Dict) -> Dict[str, Any]:
known, unknown = \
_UndefinedParameterAction._separate_defined_undefined_kvs(
cls=cls, kvs=kvs)
if len(unknown) > 0:
raise UndefinedParameterError(
f"Received undefined initialization arguments {unknown}")
return known
|
[
65,
72
] | false |
[
"KnownParameters",
"UnknownParameters",
"CatchAll"
] |
import abc
import dataclasses
import functools
import inspect
from dataclasses import Field, fields
from typing import Any, Callable, Dict, Optional, Tuple
from enum import Enum
from marshmallow import ValidationError
from dataclasses_json.utils import CatchAllVar
KnownParameters = Dict[str, Any]
UnknownParameters = Dict[str, Any]
CatchAll = Optional[CatchAllVar]
class _RaiseUndefinedParameters(_UndefinedParameterAction):
@staticmethod
def handle_from_dict(cls, kvs: Dict) -> Dict[str, Any]:
known, unknown = \
_UndefinedParameterAction._separate_defined_undefined_kvs(
cls=cls, kvs=kvs)
if len(unknown) > 0:
raise UndefinedParameterError(
f"Received undefined initialization arguments {unknown}")
return known
| true | 2 |
42 |
dataclasses_json
|
dataclasses_json.undefined
|
_IgnoreUndefinedParameters
|
handle_from_dict
|
@staticmethod
def handle_from_dict(cls, kvs: Dict) -> Dict[str, Any]:
known_given_parameters, _ = \
_UndefinedParameterAction._separate_defined_undefined_kvs(
cls=cls, kvs=kvs)
return known_given_parameters
|
[
86,
90
] | false |
[
"KnownParameters",
"UnknownParameters",
"CatchAll"
] |
import abc
import dataclasses
import functools
import inspect
from dataclasses import Field, fields
from typing import Any, Callable, Dict, Optional, Tuple
from enum import Enum
from marshmallow import ValidationError
from dataclasses_json.utils import CatchAllVar
KnownParameters = Dict[str, Any]
UnknownParameters = Dict[str, Any]
CatchAll = Optional[CatchAllVar]
class _IgnoreUndefinedParameters(_UndefinedParameterAction):
@staticmethod
def handle_from_dict(cls, kvs: Dict) -> Dict[str, Any]:
known_given_parameters, _ = \
_UndefinedParameterAction._separate_defined_undefined_kvs(
cls=cls, kvs=kvs)
return known_given_parameters
| false | 0 |
43 |
dataclasses_json
|
dataclasses_json.undefined
|
_IgnoreUndefinedParameters
|
create_init
|
@staticmethod
def create_init(obj) -> Callable:
original_init = obj.__init__
init_signature = inspect.signature(original_init)
@functools.wraps(obj.__init__)
def _ignore_init(self, *args, **kwargs):
known_kwargs, _ = \
_CatchAllUndefinedParameters._separate_defined_undefined_kvs(
obj, kwargs)
num_params_takeable = len(
init_signature.parameters) - 1 # don't count self
num_args_takeable = num_params_takeable - len(known_kwargs)
args = args[:num_args_takeable]
bound_parameters = init_signature.bind_partial(self, *args,
**known_kwargs)
bound_parameters.apply_defaults()
arguments = bound_parameters.arguments
arguments.pop("self", None)
final_parameters = \
_IgnoreUndefinedParameters.handle_from_dict(obj, arguments)
original_init(self, **final_parameters)
return _ignore_init
|
[
93,
117
] | false |
[
"KnownParameters",
"UnknownParameters",
"CatchAll"
] |
import abc
import dataclasses
import functools
import inspect
from dataclasses import Field, fields
from typing import Any, Callable, Dict, Optional, Tuple
from enum import Enum
from marshmallow import ValidationError
from dataclasses_json.utils import CatchAllVar
KnownParameters = Dict[str, Any]
UnknownParameters = Dict[str, Any]
CatchAll = Optional[CatchAllVar]
class _IgnoreUndefinedParameters(_UndefinedParameterAction):
@staticmethod
def create_init(obj) -> Callable:
original_init = obj.__init__
init_signature = inspect.signature(original_init)
@functools.wraps(obj.__init__)
def _ignore_init(self, *args, **kwargs):
known_kwargs, _ = \
_CatchAllUndefinedParameters._separate_defined_undefined_kvs(
obj, kwargs)
num_params_takeable = len(
init_signature.parameters) - 1 # don't count self
num_args_takeable = num_params_takeable - len(known_kwargs)
args = args[:num_args_takeable]
bound_parameters = init_signature.bind_partial(self, *args,
**known_kwargs)
bound_parameters.apply_defaults()
arguments = bound_parameters.arguments
arguments.pop("self", None)
final_parameters = \
_IgnoreUndefinedParameters.handle_from_dict(obj, arguments)
original_init(self, **final_parameters)
return _ignore_init
| false | 0 |
44 |
dataclasses_json
|
dataclasses_json.undefined
|
_CatchAllUndefinedParameters
|
handle_from_dict
|
@staticmethod
def handle_from_dict(cls, kvs: Dict) -> Dict[str, Any]:
known, unknown = _UndefinedParameterAction \
._separate_defined_undefined_kvs(cls=cls, kvs=kvs)
catch_all_field = _CatchAllUndefinedParameters._get_catch_all_field(
cls=cls)
if catch_all_field.name in known:
already_parsed = isinstance(known[catch_all_field.name], dict)
default_value = _CatchAllUndefinedParameters._get_default(
catch_all_field=catch_all_field)
received_default = default_value == known[catch_all_field.name]
value_to_write: Any
if received_default and len(unknown) == 0:
value_to_write = default_value
elif received_default and len(unknown) > 0:
value_to_write = unknown
elif already_parsed:
# Did not receive default
value_to_write = known[catch_all_field.name]
if len(unknown) > 0:
value_to_write.update(unknown)
else:
error_message = f"Received input field with " \
f"same name as catch-all field: " \
f"'{catch_all_field.name}': " \
f"'{known[catch_all_field.name]}'"
raise UndefinedParameterError(error_message)
else:
value_to_write = unknown
known[catch_all_field.name] = value_to_write
return known
|
[
133,
166
] | false |
[
"KnownParameters",
"UnknownParameters",
"CatchAll"
] |
import abc
import dataclasses
import functools
import inspect
from dataclasses import Field, fields
from typing import Any, Callable, Dict, Optional, Tuple
from enum import Enum
from marshmallow import ValidationError
from dataclasses_json.utils import CatchAllVar
KnownParameters = Dict[str, Any]
UnknownParameters = Dict[str, Any]
CatchAll = Optional[CatchAllVar]
class _CatchAllUndefinedParameters(_UndefinedParameterAction):
@staticmethod
def handle_from_dict(cls, kvs: Dict) -> Dict[str, Any]:
known, unknown = _UndefinedParameterAction \
._separate_defined_undefined_kvs(cls=cls, kvs=kvs)
catch_all_field = _CatchAllUndefinedParameters._get_catch_all_field(
cls=cls)
if catch_all_field.name in known:
already_parsed = isinstance(known[catch_all_field.name], dict)
default_value = _CatchAllUndefinedParameters._get_default(
catch_all_field=catch_all_field)
received_default = default_value == known[catch_all_field.name]
value_to_write: Any
if received_default and len(unknown) == 0:
value_to_write = default_value
elif received_default and len(unknown) > 0:
value_to_write = unknown
elif already_parsed:
# Did not receive default
value_to_write = known[catch_all_field.name]
if len(unknown) > 0:
value_to_write.update(unknown)
else:
error_message = f"Received input field with " \
f"same name as catch-all field: " \
f"'{catch_all_field.name}': " \
f"'{known[catch_all_field.name]}'"
raise UndefinedParameterError(error_message)
else:
value_to_write = unknown
known[catch_all_field.name] = value_to_write
return known
| true | 2 |
45 |
dataclasses_json
|
dataclasses_json.undefined
|
_CatchAllUndefinedParameters
|
handle_to_dict
|
@staticmethod
def handle_to_dict(obj, kvs: Dict[Any, Any]) -> Dict[Any, Any]:
catch_all_field = \
_CatchAllUndefinedParameters._get_catch_all_field(obj)
undefined_parameters = kvs.pop(catch_all_field.name)
if isinstance(undefined_parameters, dict):
kvs.update(
undefined_parameters) # If desired handle letter case here
return kvs
|
[
193,
200
] | false |
[
"KnownParameters",
"UnknownParameters",
"CatchAll"
] |
import abc
import dataclasses
import functools
import inspect
from dataclasses import Field, fields
from typing import Any, Callable, Dict, Optional, Tuple
from enum import Enum
from marshmallow import ValidationError
from dataclasses_json.utils import CatchAllVar
KnownParameters = Dict[str, Any]
UnknownParameters = Dict[str, Any]
CatchAll = Optional[CatchAllVar]
class _CatchAllUndefinedParameters(_UndefinedParameterAction):
@staticmethod
def handle_to_dict(obj, kvs: Dict[Any, Any]) -> Dict[Any, Any]:
catch_all_field = \
_CatchAllUndefinedParameters._get_catch_all_field(obj)
undefined_parameters = kvs.pop(catch_all_field.name)
if isinstance(undefined_parameters, dict):
kvs.update(
undefined_parameters) # If desired handle letter case here
return kvs
| true | 2 |
46 |
dataclasses_json
|
dataclasses_json.undefined
|
_CatchAllUndefinedParameters
|
handle_dump
|
@staticmethod
def handle_dump(obj) -> Dict[Any, Any]:
catch_all_field = _CatchAllUndefinedParameters._get_catch_all_field(
cls=obj)
return getattr(obj, catch_all_field.name)
|
[
203,
206
] | false |
[
"KnownParameters",
"UnknownParameters",
"CatchAll"
] |
import abc
import dataclasses
import functools
import inspect
from dataclasses import Field, fields
from typing import Any, Callable, Dict, Optional, Tuple
from enum import Enum
from marshmallow import ValidationError
from dataclasses_json.utils import CatchAllVar
KnownParameters = Dict[str, Any]
UnknownParameters = Dict[str, Any]
CatchAll = Optional[CatchAllVar]
class _CatchAllUndefinedParameters(_UndefinedParameterAction):
@staticmethod
def handle_dump(obj) -> Dict[Any, Any]:
catch_all_field = _CatchAllUndefinedParameters._get_catch_all_field(
cls=obj)
return getattr(obj, catch_all_field.name)
| false | 0 |
47 |
dataclasses_json
|
dataclasses_json.undefined
|
_CatchAllUndefinedParameters
|
create_init
|
@staticmethod
def create_init(obj) -> Callable:
original_init = obj.__init__
init_signature = inspect.signature(original_init)
@functools.wraps(obj.__init__)
def _catch_all_init(self, *args, **kwargs):
known_kwargs, unknown_kwargs = \
_CatchAllUndefinedParameters._separate_defined_undefined_kvs(
obj, kwargs)
num_params_takeable = len(
init_signature.parameters) - 1 # don't count self
if _CatchAllUndefinedParameters._get_catch_all_field(
obj).name not in known_kwargs:
num_params_takeable -= 1
num_args_takeable = num_params_takeable - len(known_kwargs)
args, unknown_args = args[:num_args_takeable], args[
num_args_takeable:]
bound_parameters = init_signature.bind_partial(self, *args,
**known_kwargs)
unknown_args = {f"_UNKNOWN{i}": v for i, v in
enumerate(unknown_args)}
arguments = bound_parameters.arguments
arguments.update(unknown_args)
arguments.update(unknown_kwargs)
arguments.pop("self", None)
final_parameters = _CatchAllUndefinedParameters.handle_from_dict(
obj, arguments)
original_init(self, **final_parameters)
return _catch_all_init
|
[
209,
240
] | false |
[
"KnownParameters",
"UnknownParameters",
"CatchAll"
] |
import abc
import dataclasses
import functools
import inspect
from dataclasses import Field, fields
from typing import Any, Callable, Dict, Optional, Tuple
from enum import Enum
from marshmallow import ValidationError
from dataclasses_json.utils import CatchAllVar
KnownParameters = Dict[str, Any]
UnknownParameters = Dict[str, Any]
CatchAll = Optional[CatchAllVar]
class _CatchAllUndefinedParameters(_UndefinedParameterAction):
@staticmethod
def create_init(obj) -> Callable:
original_init = obj.__init__
init_signature = inspect.signature(original_init)
@functools.wraps(obj.__init__)
def _catch_all_init(self, *args, **kwargs):
known_kwargs, unknown_kwargs = \
_CatchAllUndefinedParameters._separate_defined_undefined_kvs(
obj, kwargs)
num_params_takeable = len(
init_signature.parameters) - 1 # don't count self
if _CatchAllUndefinedParameters._get_catch_all_field(
obj).name not in known_kwargs:
num_params_takeable -= 1
num_args_takeable = num_params_takeable - len(known_kwargs)
args, unknown_args = args[:num_args_takeable], args[
num_args_takeable:]
bound_parameters = init_signature.bind_partial(self, *args,
**known_kwargs)
unknown_args = {f"_UNKNOWN{i}": v for i, v in
enumerate(unknown_args)}
arguments = bound_parameters.arguments
arguments.update(unknown_args)
arguments.update(unknown_kwargs)
arguments.pop("self", None)
final_parameters = _CatchAllUndefinedParameters.handle_from_dict(
obj, arguments)
original_init(self, **final_parameters)
return _catch_all_init
| true | 2 |
48 |
docstring_parser
|
docstring_parser.common
|
DocstringMeta
|
__init__
|
def __init__(self, args: T.List[str], description: str) -> None:
"""Initialize self.
:param args: list of arguments. The exact content of this variable is
dependent on the kind of docstring; it's used to distinguish between
custom docstring meta information items.
:param description: associated docstring description.
"""
self.args = args
self.description = description
|
[
32,
41
] | false |
[
"PARAM_KEYWORDS",
"RAISES_KEYWORDS",
"RETURNS_KEYWORDS",
"YIELDS_KEYWORDS"
] |
import typing as T
PARAM_KEYWORDS = {
"param",
"parameter",
"arg",
"argument",
"attribute",
"key",
"keyword",
}
RAISES_KEYWORDS = {"raises", "raise", "except", "exception"}
RETURNS_KEYWORDS = {"return", "returns"}
YIELDS_KEYWORDS = {"yield", "yields"}
class DocstringMeta:
def __init__(self, args: T.List[str], description: str) -> None:
"""Initialize self.
:param args: list of arguments. The exact content of this variable is
dependent on the kind of docstring; it's used to distinguish between
custom docstring meta information items.
:param description: associated docstring description.
"""
self.args = args
self.description = description
| false | 0 |
49 |
docstring_parser
|
docstring_parser.common
|
DocstringParam
|
__init__
|
def __init__(
self,
args: T.List[str],
description: T.Optional[str],
arg_name: str,
type_name: T.Optional[str],
is_optional: T.Optional[bool],
default: T.Optional[str],
) -> None:
"""Initialize self."""
super().__init__(args, description)
self.arg_name = arg_name
self.type_name = type_name
self.is_optional = is_optional
self.default = default
|
[
47,
61
] | false |
[
"PARAM_KEYWORDS",
"RAISES_KEYWORDS",
"RETURNS_KEYWORDS",
"YIELDS_KEYWORDS"
] |
import typing as T
PARAM_KEYWORDS = {
"param",
"parameter",
"arg",
"argument",
"attribute",
"key",
"keyword",
}
RAISES_KEYWORDS = {"raises", "raise", "except", "exception"}
RETURNS_KEYWORDS = {"return", "returns"}
YIELDS_KEYWORDS = {"yield", "yields"}
class DocstringParam(DocstringMeta):
def __init__(
self,
args: T.List[str],
description: T.Optional[str],
arg_name: str,
type_name: T.Optional[str],
is_optional: T.Optional[bool],
default: T.Optional[str],
) -> None:
"""Initialize self."""
super().__init__(args, description)
self.arg_name = arg_name
self.type_name = type_name
self.is_optional = is_optional
self.default = default
| false | 0 |
50 |
docstring_parser
|
docstring_parser.common
|
DocstringReturns
|
__init__
|
def __init__(
self,
args: T.List[str],
description: T.Optional[str],
type_name: T.Optional[str],
is_generator: bool,
return_name: T.Optional[str] = None,
) -> None:
"""Initialize self."""
super().__init__(args, description)
self.type_name = type_name
self.is_generator = is_generator
self.return_name = return_name
|
[
67,
79
] | false |
[
"PARAM_KEYWORDS",
"RAISES_KEYWORDS",
"RETURNS_KEYWORDS",
"YIELDS_KEYWORDS"
] |
import typing as T
PARAM_KEYWORDS = {
"param",
"parameter",
"arg",
"argument",
"attribute",
"key",
"keyword",
}
RAISES_KEYWORDS = {"raises", "raise", "except", "exception"}
RETURNS_KEYWORDS = {"return", "returns"}
YIELDS_KEYWORDS = {"yield", "yields"}
class DocstringReturns(DocstringMeta):
def __init__(
self,
args: T.List[str],
description: T.Optional[str],
type_name: T.Optional[str],
is_generator: bool,
return_name: T.Optional[str] = None,
) -> None:
"""Initialize self."""
super().__init__(args, description)
self.type_name = type_name
self.is_generator = is_generator
self.return_name = return_name
| false | 0 |
51 |
docstring_parser
|
docstring_parser.common
|
DocstringRaises
|
__init__
|
def __init__(
self,
args: T.List[str],
description: T.Optional[str],
type_name: T.Optional[str],
) -> None:
"""Initialize self."""
super().__init__(args, description)
self.type_name = type_name
self.description = description
|
[
85,
94
] | false |
[
"PARAM_KEYWORDS",
"RAISES_KEYWORDS",
"RETURNS_KEYWORDS",
"YIELDS_KEYWORDS"
] |
import typing as T
PARAM_KEYWORDS = {
"param",
"parameter",
"arg",
"argument",
"attribute",
"key",
"keyword",
}
RAISES_KEYWORDS = {"raises", "raise", "except", "exception"}
RETURNS_KEYWORDS = {"return", "returns"}
YIELDS_KEYWORDS = {"yield", "yields"}
class DocstringRaises(DocstringMeta):
def __init__(
self,
args: T.List[str],
description: T.Optional[str],
type_name: T.Optional[str],
) -> None:
"""Initialize self."""
super().__init__(args, description)
self.type_name = type_name
self.description = description
| false | 0 |
52 |
docstring_parser
|
docstring_parser.common
|
DocstringDeprecated
|
__init__
|
def __init__(
self,
args: T.List[str],
description: T.Optional[str],
version: T.Optional[str],
) -> None:
"""Initialize self."""
super().__init__(args, description)
self.version = version
self.description = description
|
[
100,
109
] | false |
[
"PARAM_KEYWORDS",
"RAISES_KEYWORDS",
"RETURNS_KEYWORDS",
"YIELDS_KEYWORDS"
] |
import typing as T
PARAM_KEYWORDS = {
"param",
"parameter",
"arg",
"argument",
"attribute",
"key",
"keyword",
}
RAISES_KEYWORDS = {"raises", "raise", "except", "exception"}
RETURNS_KEYWORDS = {"return", "returns"}
YIELDS_KEYWORDS = {"yield", "yields"}
class DocstringDeprecated(DocstringMeta):
def __init__(
self,
args: T.List[str],
description: T.Optional[str],
version: T.Optional[str],
) -> None:
"""Initialize self."""
super().__init__(args, description)
self.version = version
self.description = description
| false | 0 |
53 |
docstring_parser
|
docstring_parser.common
|
Docstring
|
__init__
|
def __init__(self) -> None:
"""Initialize self."""
self.short_description = None # type: T.Optional[str]
self.long_description = None # type: T.Optional[str]
self.blank_after_short_description = False
self.blank_after_long_description = False
self.meta = []
|
[
115,
121
] | false |
[
"PARAM_KEYWORDS",
"RAISES_KEYWORDS",
"RETURNS_KEYWORDS",
"YIELDS_KEYWORDS"
] |
import typing as T
PARAM_KEYWORDS = {
"param",
"parameter",
"arg",
"argument",
"attribute",
"key",
"keyword",
}
RAISES_KEYWORDS = {"raises", "raise", "except", "exception"}
RETURNS_KEYWORDS = {"return", "returns"}
YIELDS_KEYWORDS = {"yield", "yields"}
class Docstring:
def __init__(self) -> None:
"""Initialize self."""
self.short_description = None # type: T.Optional[str]
self.long_description = None # type: T.Optional[str]
self.blank_after_short_description = False
self.blank_after_long_description = False
self.meta = []
| false | 0 |
54 |
docstring_parser
|
docstring_parser.google
|
GoogleParser
|
add_section
|
def add_section(self, section: Section):
"""Add or replace a section.
:param section: The new section.
"""
self.sections[section.title] = section
self._setup()
|
[
174,
181
] | false |
[
"GOOGLE_TYPED_ARG_REGEX",
"GOOGLE_ARG_DESC_REGEX",
"MULTIPLE_PATTERN",
"DEFAULT_SECTIONS"
] |
import inspect
import re
import typing as T
from collections import namedtuple, OrderedDict
from enum import IntEnum
from .common import (
PARAM_KEYWORDS,
RAISES_KEYWORDS,
RETURNS_KEYWORDS,
YIELDS_KEYWORDS,
Docstring,
DocstringMeta,
DocstringParam,
DocstringRaises,
DocstringReturns,
ParseError,
)
GOOGLE_TYPED_ARG_REGEX = re.compile(r"\s*(.+?)\s*\(\s*(.*[^\s]+)\s*\)")
GOOGLE_ARG_DESC_REGEX = re.compile(r".*\. Defaults to (.+)\.")
MULTIPLE_PATTERN = re.compile(r"(\s*[^:\s]+:)|([^:]*\]:.*)")
DEFAULT_SECTIONS = [
Section("Arguments", "param", SectionType.MULTIPLE),
Section("Args", "param", SectionType.MULTIPLE),
Section("Parameters", "param", SectionType.MULTIPLE),
Section("Params", "param", SectionType.MULTIPLE),
Section("Raises", "raises", SectionType.MULTIPLE),
Section("Exceptions", "raises", SectionType.MULTIPLE),
Section("Except", "raises", SectionType.MULTIPLE),
Section("Attributes", "attribute", SectionType.MULTIPLE),
Section("Example", "examples", SectionType.SINGULAR),
Section("Examples", "examples", SectionType.SINGULAR),
Section("Returns", "returns", SectionType.SINGULAR_OR_MULTIPLE),
Section("Yields", "yields", SectionType.SINGULAR_OR_MULTIPLE),
]
class GoogleParser:
def __init__(
self, sections: T.Optional[T.List[Section]] = None, title_colon=True
):
"""Setup sections.
:param sections: Recognized sections or None to defaults.
:param title_colon: require colon after section title.
"""
if not sections:
sections = DEFAULT_SECTIONS
self.sections = {s.title: s for s in sections}
self.title_colon = title_colon
self._setup()
def add_section(self, section: Section):
"""Add or replace a section.
:param section: The new section.
"""
self.sections[section.title] = section
self._setup()
| false | 0 |
55 |
docstring_parser
|
docstring_parser.google
|
GoogleParser
|
parse
|
def parse(self, text: str) -> Docstring:
"""Parse the Google-style docstring into its components.
:returns: parsed docstring
"""
ret = Docstring()
if not text:
return ret
# Clean according to PEP-0257
text = inspect.cleandoc(text)
# Find first title and split on its position
match = self.titles_re.search(text)
if match:
desc_chunk = text[: match.start()]
meta_chunk = text[match.start() :]
else:
desc_chunk = text
meta_chunk = ""
# Break description into short and long parts
parts = desc_chunk.split("\n", 1)
ret.short_description = parts[0] or None
if len(parts) > 1:
long_desc_chunk = parts[1] or ""
ret.blank_after_short_description = long_desc_chunk.startswith(
"\n"
)
ret.blank_after_long_description = long_desc_chunk.endswith("\n\n")
ret.long_description = long_desc_chunk.strip() or None
# Split by sections determined by titles
matches = list(self.titles_re.finditer(meta_chunk))
if not matches:
return ret
splits = []
for j in range(len(matches) - 1):
splits.append((matches[j].end(), matches[j + 1].start()))
splits.append((matches[-1].end(), len(meta_chunk)))
chunks = OrderedDict()
for j, (start, end) in enumerate(splits):
title = matches[j].group(1)
if title not in self.sections:
continue
chunks[title] = meta_chunk[start:end].strip("\n")
if not chunks:
return ret
# Add elements from each chunk
for title, chunk in chunks.items():
# Determine indent
indent_match = re.search(r"^\s+", chunk)
if not indent_match:
raise ParseError('Can\'t infer indent from "{}"'.format(chunk))
indent = indent_match.group()
# Check for singular elements
if self.sections[title].type in [
SectionType.SINGULAR,
SectionType.SINGULAR_OR_MULTIPLE,
]:
part = inspect.cleandoc(chunk)
ret.meta.append(self._build_meta(part, title))
continue
# Split based on lines which have exactly that indent
_re = "^" + indent + r"(?=\S)"
c_matches = list(re.finditer(_re, chunk, flags=re.M))
if not c_matches:
raise ParseError(
'No specification for "{}": "{}"'.format(title, chunk)
)
c_splits = []
for j in range(len(c_matches) - 1):
c_splits.append((c_matches[j].end(), c_matches[j + 1].start()))
c_splits.append((c_matches[-1].end(), len(chunk)))
for j, (start, end) in enumerate(c_splits):
part = chunk[start:end].strip("\n")
ret.meta.append(self._build_meta(part, title))
return ret
|
[
183,
265
] | false |
[
"GOOGLE_TYPED_ARG_REGEX",
"GOOGLE_ARG_DESC_REGEX",
"MULTIPLE_PATTERN",
"DEFAULT_SECTIONS"
] |
import inspect
import re
import typing as T
from collections import namedtuple, OrderedDict
from enum import IntEnum
from .common import (
PARAM_KEYWORDS,
RAISES_KEYWORDS,
RETURNS_KEYWORDS,
YIELDS_KEYWORDS,
Docstring,
DocstringMeta,
DocstringParam,
DocstringRaises,
DocstringReturns,
ParseError,
)
GOOGLE_TYPED_ARG_REGEX = re.compile(r"\s*(.+?)\s*\(\s*(.*[^\s]+)\s*\)")
GOOGLE_ARG_DESC_REGEX = re.compile(r".*\. Defaults to (.+)\.")
MULTIPLE_PATTERN = re.compile(r"(\s*[^:\s]+:)|([^:]*\]:.*)")
DEFAULT_SECTIONS = [
Section("Arguments", "param", SectionType.MULTIPLE),
Section("Args", "param", SectionType.MULTIPLE),
Section("Parameters", "param", SectionType.MULTIPLE),
Section("Params", "param", SectionType.MULTIPLE),
Section("Raises", "raises", SectionType.MULTIPLE),
Section("Exceptions", "raises", SectionType.MULTIPLE),
Section("Except", "raises", SectionType.MULTIPLE),
Section("Attributes", "attribute", SectionType.MULTIPLE),
Section("Example", "examples", SectionType.SINGULAR),
Section("Examples", "examples", SectionType.SINGULAR),
Section("Returns", "returns", SectionType.SINGULAR_OR_MULTIPLE),
Section("Yields", "yields", SectionType.SINGULAR_OR_MULTIPLE),
]
class GoogleParser:
def __init__(
self, sections: T.Optional[T.List[Section]] = None, title_colon=True
):
"""Setup sections.
:param sections: Recognized sections or None to defaults.
:param title_colon: require colon after section title.
"""
if not sections:
sections = DEFAULT_SECTIONS
self.sections = {s.title: s for s in sections}
self.title_colon = title_colon
self._setup()
def parse(self, text: str) -> Docstring:
"""Parse the Google-style docstring into its components.
:returns: parsed docstring
"""
ret = Docstring()
if not text:
return ret
# Clean according to PEP-0257
text = inspect.cleandoc(text)
# Find first title and split on its position
match = self.titles_re.search(text)
if match:
desc_chunk = text[: match.start()]
meta_chunk = text[match.start() :]
else:
desc_chunk = text
meta_chunk = ""
# Break description into short and long parts
parts = desc_chunk.split("\n", 1)
ret.short_description = parts[0] or None
if len(parts) > 1:
long_desc_chunk = parts[1] or ""
ret.blank_after_short_description = long_desc_chunk.startswith(
"\n"
)
ret.blank_after_long_description = long_desc_chunk.endswith("\n\n")
ret.long_description = long_desc_chunk.strip() or None
# Split by sections determined by titles
matches = list(self.titles_re.finditer(meta_chunk))
if not matches:
return ret
splits = []
for j in range(len(matches) - 1):
splits.append((matches[j].end(), matches[j + 1].start()))
splits.append((matches[-1].end(), len(meta_chunk)))
chunks = OrderedDict()
for j, (start, end) in enumerate(splits):
title = matches[j].group(1)
if title not in self.sections:
continue
chunks[title] = meta_chunk[start:end].strip("\n")
if not chunks:
return ret
# Add elements from each chunk
for title, chunk in chunks.items():
# Determine indent
indent_match = re.search(r"^\s+", chunk)
if not indent_match:
raise ParseError('Can\'t infer indent from "{}"'.format(chunk))
indent = indent_match.group()
# Check for singular elements
if self.sections[title].type in [
SectionType.SINGULAR,
SectionType.SINGULAR_OR_MULTIPLE,
]:
part = inspect.cleandoc(chunk)
ret.meta.append(self._build_meta(part, title))
continue
# Split based on lines which have exactly that indent
_re = "^" + indent + r"(?=\S)"
c_matches = list(re.finditer(_re, chunk, flags=re.M))
if not c_matches:
raise ParseError(
'No specification for "{}": "{}"'.format(title, chunk)
)
c_splits = []
for j in range(len(c_matches) - 1):
c_splits.append((c_matches[j].end(), c_matches[j + 1].start()))
c_splits.append((c_matches[-1].end(), len(chunk)))
for j, (start, end) in enumerate(c_splits):
part = chunk[start:end].strip("\n")
ret.meta.append(self._build_meta(part, title))
return ret
| true | 2 |
56 |
docstring_parser
|
docstring_parser.numpydoc
|
parse
|
def parse(text: str) -> Docstring:
"""Parse the numpy-style docstring into its components.
:returns: parsed docstring
"""
return NumpydocParser().parse(text)
|
[
325,
330
] | false |
[
"KV_REGEX",
"PARAM_KEY_REGEX",
"PARAM_OPTIONAL_REGEX",
"PARAM_DEFAULT_REGEX",
"RETURN_KEY_REGEX",
"DEFAULT_SECTIONS"
] |
import inspect
import itertools
import re
import typing as T
from .common import (
Docstring,
DocstringDeprecated,
DocstringMeta,
DocstringParam,
DocstringRaises,
DocstringReturns,
)
KV_REGEX = re.compile(r"^[^\s].*$", flags=re.M)
PARAM_KEY_REGEX = re.compile(r"^(?P<name>.*?)(?:\s*:\s*(?P<type>.*?))?$")
PARAM_OPTIONAL_REGEX = re.compile(r"(?P<type>.*?)(?:, optional|\(optional\))$")
PARAM_DEFAULT_REGEX = re.compile(
r"[Dd]efault(?: is | = |: |s to |)\s*(?P<value>[\w\-\.]+)"
)
RETURN_KEY_REGEX = re.compile(r"^(?:(?P<name>.*?)\s*:\s*)?(?P<type>.*?)$")
DEFAULT_SECTIONS = [
ParamSection("Parameters", "param"),
ParamSection("Params", "param"),
ParamSection("Arguments", "param"),
ParamSection("Args", "param"),
ParamSection("Other Parameters", "other_param"),
ParamSection("Other Params", "other_param"),
ParamSection("Other Arguments", "other_param"),
ParamSection("Other Args", "other_param"),
ParamSection("Receives", "receives"),
ParamSection("Receive", "receives"),
RaisesSection("Raises", "raises"),
RaisesSection("Raise", "raises"),
RaisesSection("Warns", "warns"),
RaisesSection("Warn", "warns"),
ParamSection("Attributes", "attribute"),
ParamSection("Attribute", "attribute"),
ReturnsSection("Returns", "returns"),
ReturnsSection("Return", "returns"),
YieldsSection("Yields", "yields"),
YieldsSection("Yield", "yields"),
Section("Examples", "examples"),
Section("Example", "examples"),
Section("Warnings", "warnings"),
Section("Warning", "warnings"),
Section("See Also", "see_also"),
Section("Related", "see_also"),
Section("Notes", "notes"),
Section("Note", "notes"),
Section("References", "references"),
Section("Reference", "references"),
DeprecationSection("deprecated", "deprecation"),
]
class NumpydocParser:
def __init__(self, sections: T.Optional[T.Dict[str, Section]] = None):
"""Setup sections.
:param sections: Recognized sections or None to defaults.
"""
sections = sections or DEFAULT_SECTIONS
self.sections = {s.title: s for s in sections}
self._setup()
def parse(text: str) -> Docstring:
"""Parse the numpy-style docstring into its components.
:returns: parsed docstring
"""
return NumpydocParser().parse(text)
| false | 0 |
|
57 |
docstring_parser
|
docstring_parser.numpydoc
|
Section
|
__init__
|
def __init__(self, title: str, key: str) -> None:
self.title = title
self.key = key
|
[
57,
59
] | false |
[
"KV_REGEX",
"PARAM_KEY_REGEX",
"PARAM_OPTIONAL_REGEX",
"PARAM_DEFAULT_REGEX",
"RETURN_KEY_REGEX",
"DEFAULT_SECTIONS"
] |
import inspect
import itertools
import re
import typing as T
from .common import (
Docstring,
DocstringDeprecated,
DocstringMeta,
DocstringParam,
DocstringRaises,
DocstringReturns,
)
KV_REGEX = re.compile(r"^[^\s].*$", flags=re.M)
PARAM_KEY_REGEX = re.compile(r"^(?P<name>.*?)(?:\s*:\s*(?P<type>.*?))?$")
PARAM_OPTIONAL_REGEX = re.compile(r"(?P<type>.*?)(?:, optional|\(optional\))$")
PARAM_DEFAULT_REGEX = re.compile(
r"[Dd]efault(?: is | = |: |s to |)\s*(?P<value>[\w\-\.]+)"
)
RETURN_KEY_REGEX = re.compile(r"^(?:(?P<name>.*?)\s*:\s*)?(?P<type>.*?)$")
DEFAULT_SECTIONS = [
ParamSection("Parameters", "param"),
ParamSection("Params", "param"),
ParamSection("Arguments", "param"),
ParamSection("Args", "param"),
ParamSection("Other Parameters", "other_param"),
ParamSection("Other Params", "other_param"),
ParamSection("Other Arguments", "other_param"),
ParamSection("Other Args", "other_param"),
ParamSection("Receives", "receives"),
ParamSection("Receive", "receives"),
RaisesSection("Raises", "raises"),
RaisesSection("Raise", "raises"),
RaisesSection("Warns", "warns"),
RaisesSection("Warn", "warns"),
ParamSection("Attributes", "attribute"),
ParamSection("Attribute", "attribute"),
ReturnsSection("Returns", "returns"),
ReturnsSection("Return", "returns"),
YieldsSection("Yields", "yields"),
YieldsSection("Yield", "yields"),
Section("Examples", "examples"),
Section("Example", "examples"),
Section("Warnings", "warnings"),
Section("Warning", "warnings"),
Section("See Also", "see_also"),
Section("Related", "see_also"),
Section("Notes", "notes"),
Section("Note", "notes"),
Section("References", "references"),
Section("Reference", "references"),
DeprecationSection("deprecated", "deprecation"),
]
class Section:
def __init__(self, title: str, key: str) -> None:
self.title = title
self.key = key
| false | 0 |
58 |
docstring_parser
|
docstring_parser.numpydoc
|
Section
|
parse
|
def parse(self, text: str) -> T.Iterable[DocstringMeta]:
"""Parse ``DocstringMeta`` objects from the body of this section.
:param text: section body text. Should be cleaned with
``inspect.cleandoc`` before parsing.
"""
yield DocstringMeta([self.key], description=_clean_str(text))
|
[
70,
76
] | false |
[
"KV_REGEX",
"PARAM_KEY_REGEX",
"PARAM_OPTIONAL_REGEX",
"PARAM_DEFAULT_REGEX",
"RETURN_KEY_REGEX",
"DEFAULT_SECTIONS"
] |
import inspect
import itertools
import re
import typing as T
from .common import (
Docstring,
DocstringDeprecated,
DocstringMeta,
DocstringParam,
DocstringRaises,
DocstringReturns,
)
KV_REGEX = re.compile(r"^[^\s].*$", flags=re.M)
PARAM_KEY_REGEX = re.compile(r"^(?P<name>.*?)(?:\s*:\s*(?P<type>.*?))?$")
PARAM_OPTIONAL_REGEX = re.compile(r"(?P<type>.*?)(?:, optional|\(optional\))$")
PARAM_DEFAULT_REGEX = re.compile(
r"[Dd]efault(?: is | = |: |s to |)\s*(?P<value>[\w\-\.]+)"
)
RETURN_KEY_REGEX = re.compile(r"^(?:(?P<name>.*?)\s*:\s*)?(?P<type>.*?)$")
DEFAULT_SECTIONS = [
ParamSection("Parameters", "param"),
ParamSection("Params", "param"),
ParamSection("Arguments", "param"),
ParamSection("Args", "param"),
ParamSection("Other Parameters", "other_param"),
ParamSection("Other Params", "other_param"),
ParamSection("Other Arguments", "other_param"),
ParamSection("Other Args", "other_param"),
ParamSection("Receives", "receives"),
ParamSection("Receive", "receives"),
RaisesSection("Raises", "raises"),
RaisesSection("Raise", "raises"),
RaisesSection("Warns", "warns"),
RaisesSection("Warn", "warns"),
ParamSection("Attributes", "attribute"),
ParamSection("Attribute", "attribute"),
ReturnsSection("Returns", "returns"),
ReturnsSection("Return", "returns"),
YieldsSection("Yields", "yields"),
YieldsSection("Yield", "yields"),
Section("Examples", "examples"),
Section("Example", "examples"),
Section("Warnings", "warnings"),
Section("Warning", "warnings"),
Section("See Also", "see_also"),
Section("Related", "see_also"),
Section("Notes", "notes"),
Section("Note", "notes"),
Section("References", "references"),
Section("Reference", "references"),
DeprecationSection("deprecated", "deprecation"),
]
class Section:
def __init__(self, title: str, key: str) -> None:
self.title = title
self.key = key
def parse(self, text: str) -> T.Iterable[DocstringMeta]:
"""Parse ``DocstringMeta`` objects from the body of this section.
:param text: section body text. Should be cleaned with
``inspect.cleandoc`` before parsing.
"""
yield DocstringMeta([self.key], description=_clean_str(text))
| false | 0 |
59 |
docstring_parser
|
docstring_parser.numpydoc
|
_KVSection
|
parse
|
def parse(self, text: str) -> T.Iterable[DocstringMeta]:
for match, next_match in _pairwise(KV_REGEX.finditer(text)):
start = match.end()
end = next_match.start() if next_match is not None else None
value = text[start:end]
yield self._parse_item(
key=match.group(), value=inspect.cleandoc(value)
)
|
[
93,
98
] | false |
[
"KV_REGEX",
"PARAM_KEY_REGEX",
"PARAM_OPTIONAL_REGEX",
"PARAM_DEFAULT_REGEX",
"RETURN_KEY_REGEX",
"DEFAULT_SECTIONS"
] |
import inspect
import itertools
import re
import typing as T
from .common import (
Docstring,
DocstringDeprecated,
DocstringMeta,
DocstringParam,
DocstringRaises,
DocstringReturns,
)
KV_REGEX = re.compile(r"^[^\s].*$", flags=re.M)
PARAM_KEY_REGEX = re.compile(r"^(?P<name>.*?)(?:\s*:\s*(?P<type>.*?))?$")
PARAM_OPTIONAL_REGEX = re.compile(r"(?P<type>.*?)(?:, optional|\(optional\))$")
PARAM_DEFAULT_REGEX = re.compile(
r"[Dd]efault(?: is | = |: |s to |)\s*(?P<value>[\w\-\.]+)"
)
RETURN_KEY_REGEX = re.compile(r"^(?:(?P<name>.*?)\s*:\s*)?(?P<type>.*?)$")
DEFAULT_SECTIONS = [
ParamSection("Parameters", "param"),
ParamSection("Params", "param"),
ParamSection("Arguments", "param"),
ParamSection("Args", "param"),
ParamSection("Other Parameters", "other_param"),
ParamSection("Other Params", "other_param"),
ParamSection("Other Arguments", "other_param"),
ParamSection("Other Args", "other_param"),
ParamSection("Receives", "receives"),
ParamSection("Receive", "receives"),
RaisesSection("Raises", "raises"),
RaisesSection("Raise", "raises"),
RaisesSection("Warns", "warns"),
RaisesSection("Warn", "warns"),
ParamSection("Attributes", "attribute"),
ParamSection("Attribute", "attribute"),
ReturnsSection("Returns", "returns"),
ReturnsSection("Return", "returns"),
YieldsSection("Yields", "yields"),
YieldsSection("Yield", "yields"),
Section("Examples", "examples"),
Section("Example", "examples"),
Section("Warnings", "warnings"),
Section("Warning", "warnings"),
Section("See Also", "see_also"),
Section("Related", "see_also"),
Section("Notes", "notes"),
Section("Note", "notes"),
Section("References", "references"),
Section("Reference", "references"),
DeprecationSection("deprecated", "deprecation"),
]
class _KVSection(Section):
def parse(self, text: str) -> T.Iterable[DocstringMeta]:
for match, next_match in _pairwise(KV_REGEX.finditer(text)):
start = match.end()
end = next_match.start() if next_match is not None else None
value = text[start:end]
yield self._parse_item(
key=match.group(), value=inspect.cleandoc(value)
)
| true | 2 |
60 |
docstring_parser
|
docstring_parser.numpydoc
|
DeprecationSection
|
parse
|
def parse(self, text: str) -> T.Iterable[DocstringDeprecated]:
version, desc, *_ = text.split(sep="\n", maxsplit=1) + [None, None]
if desc is not None:
desc = _clean_str(inspect.cleandoc(desc))
yield DocstringDeprecated(
args=[self.key], description=desc, version=_clean_str(version)
)
|
[
209,
215
] | false |
[
"KV_REGEX",
"PARAM_KEY_REGEX",
"PARAM_OPTIONAL_REGEX",
"PARAM_DEFAULT_REGEX",
"RETURN_KEY_REGEX",
"DEFAULT_SECTIONS"
] |
import inspect
import itertools
import re
import typing as T
from .common import (
Docstring,
DocstringDeprecated,
DocstringMeta,
DocstringParam,
DocstringRaises,
DocstringReturns,
)
KV_REGEX = re.compile(r"^[^\s].*$", flags=re.M)
PARAM_KEY_REGEX = re.compile(r"^(?P<name>.*?)(?:\s*:\s*(?P<type>.*?))?$")
PARAM_OPTIONAL_REGEX = re.compile(r"(?P<type>.*?)(?:, optional|\(optional\))$")
PARAM_DEFAULT_REGEX = re.compile(
r"[Dd]efault(?: is | = |: |s to |)\s*(?P<value>[\w\-\.]+)"
)
RETURN_KEY_REGEX = re.compile(r"^(?:(?P<name>.*?)\s*:\s*)?(?P<type>.*?)$")
DEFAULT_SECTIONS = [
ParamSection("Parameters", "param"),
ParamSection("Params", "param"),
ParamSection("Arguments", "param"),
ParamSection("Args", "param"),
ParamSection("Other Parameters", "other_param"),
ParamSection("Other Params", "other_param"),
ParamSection("Other Arguments", "other_param"),
ParamSection("Other Args", "other_param"),
ParamSection("Receives", "receives"),
ParamSection("Receive", "receives"),
RaisesSection("Raises", "raises"),
RaisesSection("Raise", "raises"),
RaisesSection("Warns", "warns"),
RaisesSection("Warn", "warns"),
ParamSection("Attributes", "attribute"),
ParamSection("Attribute", "attribute"),
ReturnsSection("Returns", "returns"),
ReturnsSection("Return", "returns"),
YieldsSection("Yields", "yields"),
YieldsSection("Yield", "yields"),
Section("Examples", "examples"),
Section("Example", "examples"),
Section("Warnings", "warnings"),
Section("Warning", "warnings"),
Section("See Also", "see_also"),
Section("Related", "see_also"),
Section("Notes", "notes"),
Section("Note", "notes"),
Section("References", "references"),
Section("Reference", "references"),
DeprecationSection("deprecated", "deprecation"),
]
class DeprecationSection(_SphinxSection):
def parse(self, text: str) -> T.Iterable[DocstringDeprecated]:
version, desc, *_ = text.split(sep="\n", maxsplit=1) + [None, None]
if desc is not None:
desc = _clean_str(inspect.cleandoc(desc))
yield DocstringDeprecated(
args=[self.key], description=desc, version=_clean_str(version)
)
| true | 2 |
61 |
docstring_parser
|
docstring_parser.numpydoc
|
NumpydocParser
|
__init__
|
def __init__(self, sections: T.Optional[T.Dict[str, Section]] = None):
"""Setup sections.
:param sections: Recognized sections or None to defaults.
"""
sections = sections or DEFAULT_SECTIONS
self.sections = {s.title: s for s in sections}
self._setup()
|
[
256,
263
] | false |
[
"KV_REGEX",
"PARAM_KEY_REGEX",
"PARAM_OPTIONAL_REGEX",
"PARAM_DEFAULT_REGEX",
"RETURN_KEY_REGEX",
"DEFAULT_SECTIONS"
] |
import inspect
import itertools
import re
import typing as T
from .common import (
Docstring,
DocstringDeprecated,
DocstringMeta,
DocstringParam,
DocstringRaises,
DocstringReturns,
)
KV_REGEX = re.compile(r"^[^\s].*$", flags=re.M)
PARAM_KEY_REGEX = re.compile(r"^(?P<name>.*?)(?:\s*:\s*(?P<type>.*?))?$")
PARAM_OPTIONAL_REGEX = re.compile(r"(?P<type>.*?)(?:, optional|\(optional\))$")
PARAM_DEFAULT_REGEX = re.compile(
r"[Dd]efault(?: is | = |: |s to |)\s*(?P<value>[\w\-\.]+)"
)
RETURN_KEY_REGEX = re.compile(r"^(?:(?P<name>.*?)\s*:\s*)?(?P<type>.*?)$")
DEFAULT_SECTIONS = [
ParamSection("Parameters", "param"),
ParamSection("Params", "param"),
ParamSection("Arguments", "param"),
ParamSection("Args", "param"),
ParamSection("Other Parameters", "other_param"),
ParamSection("Other Params", "other_param"),
ParamSection("Other Arguments", "other_param"),
ParamSection("Other Args", "other_param"),
ParamSection("Receives", "receives"),
ParamSection("Receive", "receives"),
RaisesSection("Raises", "raises"),
RaisesSection("Raise", "raises"),
RaisesSection("Warns", "warns"),
RaisesSection("Warn", "warns"),
ParamSection("Attributes", "attribute"),
ParamSection("Attribute", "attribute"),
ReturnsSection("Returns", "returns"),
ReturnsSection("Return", "returns"),
YieldsSection("Yields", "yields"),
YieldsSection("Yield", "yields"),
Section("Examples", "examples"),
Section("Example", "examples"),
Section("Warnings", "warnings"),
Section("Warning", "warnings"),
Section("See Also", "see_also"),
Section("Related", "see_also"),
Section("Notes", "notes"),
Section("Note", "notes"),
Section("References", "references"),
Section("Reference", "references"),
DeprecationSection("deprecated", "deprecation"),
]
class Section:
def __init__(self, title: str, key: str) -> None:
self.title = title
self.key = key
class NumpydocParser:
def __init__(self, sections: T.Optional[T.Dict[str, Section]] = None):
"""Setup sections.
:param sections: Recognized sections or None to defaults.
"""
sections = sections or DEFAULT_SECTIONS
self.sections = {s.title: s for s in sections}
self._setup()
| false | 0 |
62 |
docstring_parser
|
docstring_parser.numpydoc
|
NumpydocParser
|
add_section
|
def add_section(self, section: Section):
"""Add or replace a section.
:param section: The new section.
"""
self.sections[section.title] = section
self._setup()
|
[
271,
278
] | false |
[
"KV_REGEX",
"PARAM_KEY_REGEX",
"PARAM_OPTIONAL_REGEX",
"PARAM_DEFAULT_REGEX",
"RETURN_KEY_REGEX",
"DEFAULT_SECTIONS"
] |
import inspect
import itertools
import re
import typing as T
from .common import (
Docstring,
DocstringDeprecated,
DocstringMeta,
DocstringParam,
DocstringRaises,
DocstringReturns,
)
KV_REGEX = re.compile(r"^[^\s].*$", flags=re.M)
PARAM_KEY_REGEX = re.compile(r"^(?P<name>.*?)(?:\s*:\s*(?P<type>.*?))?$")
PARAM_OPTIONAL_REGEX = re.compile(r"(?P<type>.*?)(?:, optional|\(optional\))$")
PARAM_DEFAULT_REGEX = re.compile(
r"[Dd]efault(?: is | = |: |s to |)\s*(?P<value>[\w\-\.]+)"
)
RETURN_KEY_REGEX = re.compile(r"^(?:(?P<name>.*?)\s*:\s*)?(?P<type>.*?)$")
DEFAULT_SECTIONS = [
ParamSection("Parameters", "param"),
ParamSection("Params", "param"),
ParamSection("Arguments", "param"),
ParamSection("Args", "param"),
ParamSection("Other Parameters", "other_param"),
ParamSection("Other Params", "other_param"),
ParamSection("Other Arguments", "other_param"),
ParamSection("Other Args", "other_param"),
ParamSection("Receives", "receives"),
ParamSection("Receive", "receives"),
RaisesSection("Raises", "raises"),
RaisesSection("Raise", "raises"),
RaisesSection("Warns", "warns"),
RaisesSection("Warn", "warns"),
ParamSection("Attributes", "attribute"),
ParamSection("Attribute", "attribute"),
ReturnsSection("Returns", "returns"),
ReturnsSection("Return", "returns"),
YieldsSection("Yields", "yields"),
YieldsSection("Yield", "yields"),
Section("Examples", "examples"),
Section("Example", "examples"),
Section("Warnings", "warnings"),
Section("Warning", "warnings"),
Section("See Also", "see_also"),
Section("Related", "see_also"),
Section("Notes", "notes"),
Section("Note", "notes"),
Section("References", "references"),
Section("Reference", "references"),
DeprecationSection("deprecated", "deprecation"),
]
class Section:
def __init__(self, title: str, key: str) -> None:
self.title = title
self.key = key
class NumpydocParser:
def __init__(self, sections: T.Optional[T.Dict[str, Section]] = None):
"""Setup sections.
:param sections: Recognized sections or None to defaults.
"""
sections = sections or DEFAULT_SECTIONS
self.sections = {s.title: s for s in sections}
self._setup()
def add_section(self, section: Section):
"""Add or replace a section.
:param section: The new section.
"""
self.sections[section.title] = section
self._setup()
| false | 0 |
63 |
docstring_parser
|
docstring_parser.numpydoc
|
NumpydocParser
|
parse
|
def parse(self, text: str) -> Docstring:
"""Parse the numpy-style docstring into its components.
:returns: parsed docstring
"""
ret = Docstring()
if not text:
return ret
# Clean according to PEP-0257
text = inspect.cleandoc(text)
# Find first title and split on its position
match = self.titles_re.search(text)
if match:
desc_chunk = text[: match.start()]
meta_chunk = text[match.start() :]
else:
desc_chunk = text
meta_chunk = ""
# Break description into short and long parts
parts = desc_chunk.split("\n", 1)
ret.short_description = parts[0] or None
if len(parts) > 1:
long_desc_chunk = parts[1] or ""
ret.blank_after_short_description = long_desc_chunk.startswith(
"\n"
)
ret.blank_after_long_description = long_desc_chunk.endswith("\n\n")
ret.long_description = long_desc_chunk.strip() or None
for match, nextmatch in _pairwise(self.titles_re.finditer(meta_chunk)):
title = next(g for g in match.groups() if g is not None)
factory = self.sections[title]
# section chunk starts after the header,
# ends at the start of the next header
start = match.end()
end = nextmatch.start() if nextmatch is not None else None
ret.meta.extend(factory.parse(meta_chunk[start:end]))
return ret
|
[
280,
322
] | false |
[
"KV_REGEX",
"PARAM_KEY_REGEX",
"PARAM_OPTIONAL_REGEX",
"PARAM_DEFAULT_REGEX",
"RETURN_KEY_REGEX",
"DEFAULT_SECTIONS"
] |
import inspect
import itertools
import re
import typing as T
from .common import (
Docstring,
DocstringDeprecated,
DocstringMeta,
DocstringParam,
DocstringRaises,
DocstringReturns,
)
KV_REGEX = re.compile(r"^[^\s].*$", flags=re.M)
PARAM_KEY_REGEX = re.compile(r"^(?P<name>.*?)(?:\s*:\s*(?P<type>.*?))?$")
PARAM_OPTIONAL_REGEX = re.compile(r"(?P<type>.*?)(?:, optional|\(optional\))$")
PARAM_DEFAULT_REGEX = re.compile(
r"[Dd]efault(?: is | = |: |s to |)\s*(?P<value>[\w\-\.]+)"
)
RETURN_KEY_REGEX = re.compile(r"^(?:(?P<name>.*?)\s*:\s*)?(?P<type>.*?)$")
DEFAULT_SECTIONS = [
ParamSection("Parameters", "param"),
ParamSection("Params", "param"),
ParamSection("Arguments", "param"),
ParamSection("Args", "param"),
ParamSection("Other Parameters", "other_param"),
ParamSection("Other Params", "other_param"),
ParamSection("Other Arguments", "other_param"),
ParamSection("Other Args", "other_param"),
ParamSection("Receives", "receives"),
ParamSection("Receive", "receives"),
RaisesSection("Raises", "raises"),
RaisesSection("Raise", "raises"),
RaisesSection("Warns", "warns"),
RaisesSection("Warn", "warns"),
ParamSection("Attributes", "attribute"),
ParamSection("Attribute", "attribute"),
ReturnsSection("Returns", "returns"),
ReturnsSection("Return", "returns"),
YieldsSection("Yields", "yields"),
YieldsSection("Yield", "yields"),
Section("Examples", "examples"),
Section("Example", "examples"),
Section("Warnings", "warnings"),
Section("Warning", "warnings"),
Section("See Also", "see_also"),
Section("Related", "see_also"),
Section("Notes", "notes"),
Section("Note", "notes"),
Section("References", "references"),
Section("Reference", "references"),
DeprecationSection("deprecated", "deprecation"),
]
class NumpydocParser:
def __init__(self, sections: T.Optional[T.Dict[str, Section]] = None):
"""Setup sections.
:param sections: Recognized sections or None to defaults.
"""
sections = sections or DEFAULT_SECTIONS
self.sections = {s.title: s for s in sections}
self._setup()
def parse(self, text: str) -> Docstring:
"""Parse the numpy-style docstring into its components.
:returns: parsed docstring
"""
ret = Docstring()
if not text:
return ret
# Clean according to PEP-0257
text = inspect.cleandoc(text)
# Find first title and split on its position
match = self.titles_re.search(text)
if match:
desc_chunk = text[: match.start()]
meta_chunk = text[match.start() :]
else:
desc_chunk = text
meta_chunk = ""
# Break description into short and long parts
parts = desc_chunk.split("\n", 1)
ret.short_description = parts[0] or None
if len(parts) > 1:
long_desc_chunk = parts[1] or ""
ret.blank_after_short_description = long_desc_chunk.startswith(
"\n"
)
ret.blank_after_long_description = long_desc_chunk.endswith("\n\n")
ret.long_description = long_desc_chunk.strip() or None
for match, nextmatch in _pairwise(self.titles_re.finditer(meta_chunk)):
title = next(g for g in match.groups() if g is not None)
factory = self.sections[title]
# section chunk starts after the header,
# ends at the start of the next header
start = match.end()
end = nextmatch.start() if nextmatch is not None else None
ret.meta.extend(factory.parse(meta_chunk[start:end]))
return ret
| true | 2 |
64 |
docstring_parser
|
docstring_parser.parser
|
parse
|
def parse(text: str, style: Style = Style.auto) -> Docstring:
"""Parse the docstring into its components.
:param text: docstring text to parse
:param style: docstring style
:returns: parsed docstring representation
"""
if style != Style.auto:
return STYLES[style](text)
rets = []
for parse_ in STYLES.values():
try:
rets.append(parse_(text))
except ParseError as e:
exc = e
if not rets:
raise exc
return sorted(rets, key=lambda d: len(d.meta), reverse=True)[0]
|
[
6,
24
] | false |
[] |
from docstring_parser.common import Docstring, ParseError
from docstring_parser.styles import STYLES, Style
def parse(text: str, style: Style = Style.auto) -> Docstring:
"""Parse the docstring into its components.
:param text: docstring text to parse
:param style: docstring style
:returns: parsed docstring representation
"""
if style != Style.auto:
return STYLES[style](text)
rets = []
for parse_ in STYLES.values():
try:
rets.append(parse_(text))
except ParseError as e:
exc = e
if not rets:
raise exc
return sorted(rets, key=lambda d: len(d.meta), reverse=True)[0]
| true | 2 |
|
65 |
docstring_parser
|
docstring_parser.rest
|
parse
|
def parse(text: str) -> Docstring:
"""Parse the ReST-style docstring into its components.
:returns: parsed docstring
"""
ret = Docstring()
if not text:
return ret
text = inspect.cleandoc(text)
match = re.search("^:", text, flags=re.M)
if match:
desc_chunk = text[: match.start()]
meta_chunk = text[match.start() :]
else:
desc_chunk = text
meta_chunk = ""
parts = desc_chunk.split("\n", 1)
ret.short_description = parts[0] or None
if len(parts) > 1:
long_desc_chunk = parts[1] or ""
ret.blank_after_short_description = long_desc_chunk.startswith("\n")
ret.blank_after_long_description = long_desc_chunk.endswith("\n\n")
ret.long_description = long_desc_chunk.strip() or None
for match in re.finditer(
r"(^:.*?)(?=^:|\Z)", meta_chunk, flags=re.S | re.M
):
chunk = match.group(0)
if not chunk:
continue
try:
args_chunk, desc_chunk = chunk.lstrip(":").split(":", 1)
except ValueError:
raise ParseError(
'Error parsing meta information near "{}".'.format(chunk)
)
args = args_chunk.split()
desc = desc_chunk.strip()
if "\n" in desc:
first_line, rest = desc.split("\n", 1)
desc = first_line + "\n" + inspect.cleandoc(rest)
ret.meta.append(_build_meta(args, desc))
return ret
|
[
85,
131
] | false |
[] |
import inspect
import re
import typing as T
from .common import (
PARAM_KEYWORDS,
RAISES_KEYWORDS,
RETURNS_KEYWORDS,
YIELDS_KEYWORDS,
Docstring,
DocstringMeta,
DocstringParam,
DocstringRaises,
DocstringReturns,
ParseError,
)
def parse(text: str) -> Docstring:
"""Parse the ReST-style docstring into its components.
:returns: parsed docstring
"""
ret = Docstring()
if not text:
return ret
text = inspect.cleandoc(text)
match = re.search("^:", text, flags=re.M)
if match:
desc_chunk = text[: match.start()]
meta_chunk = text[match.start() :]
else:
desc_chunk = text
meta_chunk = ""
parts = desc_chunk.split("\n", 1)
ret.short_description = parts[0] or None
if len(parts) > 1:
long_desc_chunk = parts[1] or ""
ret.blank_after_short_description = long_desc_chunk.startswith("\n")
ret.blank_after_long_description = long_desc_chunk.endswith("\n\n")
ret.long_description = long_desc_chunk.strip() or None
for match in re.finditer(
r"(^:.*?)(?=^:|\Z)", meta_chunk, flags=re.S | re.M
):
chunk = match.group(0)
if not chunk:
continue
try:
args_chunk, desc_chunk = chunk.lstrip(":").split(":", 1)
except ValueError:
raise ParseError(
'Error parsing meta information near "{}".'.format(chunk)
)
args = args_chunk.split()
desc = desc_chunk.strip()
if "\n" in desc:
first_line, rest = desc.split("\n", 1)
desc = first_line + "\n" + inspect.cleandoc(rest)
ret.meta.append(_build_meta(args, desc))
return ret
| true | 2 |
|
66 |
flutes
|
flutes.iterator
|
chunk
|
def chunk(n: int, iterable: Iterable[T]) -> Iterator[List[T]]:
r"""Split the iterable into chunks, with each chunk containing no more than ``n`` elements.
.. code:: python
>>> list(chunk(3, range(10)))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
:param n: The maximum number of elements in one chunk.
:param iterable: The iterable.
:return: An iterator over chunks.
"""
if n <= 0:
raise ValueError("`n` should be positive")
group = []
for x in iterable:
group.append(x)
if len(group) == n:
yield group
group = []
if len(group) > 0:
yield group
|
[
22,
43
] | false |
[
"__all__",
"T",
"A",
"B",
"R"
] |
import weakref
from typing import Callable, Generic, Iterable, Iterator, List, Optional, Sequence, TypeVar, overload
__all__ = [
"chunk",
"take",
"drop",
"drop_until",
"split_by",
"scanl",
"scanr",
"LazyList",
"Range",
"MapList",
]
T = TypeVar('T')
A = TypeVar('A')
B = TypeVar('B')
R = TypeVar('R')
def chunk(n: int, iterable: Iterable[T]) -> Iterator[List[T]]:
r"""Split the iterable into chunks, with each chunk containing no more than ``n`` elements.
.. code:: python
>>> list(chunk(3, range(10)))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
:param n: The maximum number of elements in one chunk.
:param iterable: The iterable.
:return: An iterator over chunks.
"""
if n <= 0:
raise ValueError("`n` should be positive")
group = []
for x in iterable:
group.append(x)
if len(group) == n:
yield group
group = []
if len(group) > 0:
yield group
| true | 2 |
|
67 |
flutes
|
flutes.iterator
|
take
|
def take(n: int, iterable: Iterable[T]) -> Iterator[T]:
r"""Take the first :attr:`n` elements from an iterable.
.. code:: python
>>> list(take(5, range(1000000)))
[0, 1, 2, 3, 4]
:param n: The number of elements to take.
:param iterable: The iterable.
:return: An iterator returning the first :attr:`n` elements from the iterable.
"""
if n < 0:
raise ValueError("`n` should be non-negative")
try:
it = iter(iterable)
for _ in range(n):
yield next(it)
except StopIteration:
pass
|
[
46,
65
] | false |
[
"__all__",
"T",
"A",
"B",
"R"
] |
import weakref
from typing import Callable, Generic, Iterable, Iterator, List, Optional, Sequence, TypeVar, overload
__all__ = [
"chunk",
"take",
"drop",
"drop_until",
"split_by",
"scanl",
"scanr",
"LazyList",
"Range",
"MapList",
]
T = TypeVar('T')
A = TypeVar('A')
B = TypeVar('B')
R = TypeVar('R')
def take(n: int, iterable: Iterable[T]) -> Iterator[T]:
r"""Take the first :attr:`n` elements from an iterable.
.. code:: python
>>> list(take(5, range(1000000)))
[0, 1, 2, 3, 4]
:param n: The number of elements to take.
:param iterable: The iterable.
:return: An iterator returning the first :attr:`n` elements from the iterable.
"""
if n < 0:
raise ValueError("`n` should be non-negative")
try:
it = iter(iterable)
for _ in range(n):
yield next(it)
except StopIteration:
pass
| true | 2 |
|
68 |
flutes
|
flutes.iterator
|
drop
|
def drop(n: int, iterable: Iterable[T]) -> Iterator[T]:
r"""Drop the first :attr:`n` elements from an iterable, and return the rest as an iterator.
.. code:: python
>>> next(drop(5, range(1000000)))
5
:param n: The number of elements to drop.
:param iterable: The iterable.
:return: An iterator returning the remaining part of the iterable after the first :attr:`n` elements.
"""
if n < 0:
raise ValueError("`n` should be non-negative")
try:
it = iter(iterable)
for _ in range(n):
next(it)
yield from it
except StopIteration:
pass
|
[
68,
88
] | false |
[
"__all__",
"T",
"A",
"B",
"R"
] |
import weakref
from typing import Callable, Generic, Iterable, Iterator, List, Optional, Sequence, TypeVar, overload
__all__ = [
"chunk",
"take",
"drop",
"drop_until",
"split_by",
"scanl",
"scanr",
"LazyList",
"Range",
"MapList",
]
T = TypeVar('T')
A = TypeVar('A')
B = TypeVar('B')
R = TypeVar('R')
def drop(n: int, iterable: Iterable[T]) -> Iterator[T]:
r"""Drop the first :attr:`n` elements from an iterable, and return the rest as an iterator.
.. code:: python
>>> next(drop(5, range(1000000)))
5
:param n: The number of elements to drop.
:param iterable: The iterable.
:return: An iterator returning the remaining part of the iterable after the first :attr:`n` elements.
"""
if n < 0:
raise ValueError("`n` should be non-negative")
try:
it = iter(iterable)
for _ in range(n):
next(it)
yield from it
except StopIteration:
pass
| true | 2 |
|
69 |
flutes
|
flutes.iterator
|
drop_until
|
def drop_until(pred_fn: Callable[[T], bool], iterable: Iterable[T]) -> Iterator[T]:
r"""Drop elements from the iterable until an element that satisfies the predicate is encountered. Similar to the
built-in :py:func:`filter` function, but only applied to a prefix of the iterable.
.. code:: python
>>> list(drop_until(lambda x: x > 5, range(10)))
[6, 7, 8, 9]
:param pred_fn: The predicate that returned elements should satisfy.
:param iterable: The iterable.
:return: The iterator after dropping elements.
"""
iterator = iter(iterable)
for item in iterator:
if not pred_fn(item):
continue
yield item
break
yield from iterator
|
[
91,
110
] | false |
[
"__all__",
"T",
"A",
"B",
"R"
] |
import weakref
from typing import Callable, Generic, Iterable, Iterator, List, Optional, Sequence, TypeVar, overload
__all__ = [
"chunk",
"take",
"drop",
"drop_until",
"split_by",
"scanl",
"scanr",
"LazyList",
"Range",
"MapList",
]
T = TypeVar('T')
A = TypeVar('A')
B = TypeVar('B')
R = TypeVar('R')
def drop_until(pred_fn: Callable[[T], bool], iterable: Iterable[T]) -> Iterator[T]:
r"""Drop elements from the iterable until an element that satisfies the predicate is encountered. Similar to the
built-in :py:func:`filter` function, but only applied to a prefix of the iterable.
.. code:: python
>>> list(drop_until(lambda x: x > 5, range(10)))
[6, 7, 8, 9]
:param pred_fn: The predicate that returned elements should satisfy.
:param iterable: The iterable.
:return: The iterator after dropping elements.
"""
iterator = iter(iterable)
for item in iterator:
if not pred_fn(item):
continue
yield item
break
yield from iterator
| true | 2 |
|
70 |
flutes
|
flutes.iterator
|
split_by
|
def split_by(iterable: Iterable[A], empty_segments: bool = False, *, criterion=None, separator=None) \
-> Iterator[List[A]]:
r"""Split a list into sub-lists by dropping certain elements. Exactly one of ``criterion`` and ``separator`` must be
specified. For example:
.. code:: python
>>> list(split_by(range(10), criterion=lambda x: x % 3 == 0))
[[1, 2], [4, 5], [7, 8]]
>>> list(split_by(" Split by: ", empty_segments=True, separator='.'))
[[], ['S', 'p', 'l', 'i', 't'], ['b', 'y', ':'], []]
:param iterable: The list to split.
:param empty_segments: If ``True``, include an empty list in cases where two adjacent elements satisfy
the criterion.
:param criterion: The criterion to decide whether to drop an element.
:param separator: The separator for sub-lists. An element is dropped if it is equal to ``parameter``.
:return: List of sub-lists.
"""
if not ((criterion is None) ^ (separator is None)):
raise ValueError("Exactly one of `criterion` and `separator` should be specified")
if criterion is None:
criterion = lambda x: x == separator
group = []
for x in iterable:
if not criterion(x):
group.append(x)
else:
if len(group) > 0 or empty_segments:
yield group
group = []
if len(group) > 0 or empty_segments:
yield group
|
[
123,
156
] | false |
[
"__all__",
"T",
"A",
"B",
"R"
] |
import weakref
from typing import Callable, Generic, Iterable, Iterator, List, Optional, Sequence, TypeVar, overload
__all__ = [
"chunk",
"take",
"drop",
"drop_until",
"split_by",
"scanl",
"scanr",
"LazyList",
"Range",
"MapList",
]
T = TypeVar('T')
A = TypeVar('A')
B = TypeVar('B')
R = TypeVar('R')
def split_by(iterable: Iterable[A], empty_segments: bool = False, *, criterion=None, separator=None) \
-> Iterator[List[A]]:
r"""Split a list into sub-lists by dropping certain elements. Exactly one of ``criterion`` and ``separator`` must be
specified. For example:
.. code:: python
>>> list(split_by(range(10), criterion=lambda x: x % 3 == 0))
[[1, 2], [4, 5], [7, 8]]
>>> list(split_by(" Split by: ", empty_segments=True, separator='.'))
[[], ['S', 'p', 'l', 'i', 't'], ['b', 'y', ':'], []]
:param iterable: The list to split.
:param empty_segments: If ``True``, include an empty list in cases where two adjacent elements satisfy
the criterion.
:param criterion: The criterion to decide whether to drop an element.
:param separator: The separator for sub-lists. An element is dropped if it is equal to ``parameter``.
:return: List of sub-lists.
"""
if not ((criterion is None) ^ (separator is None)):
raise ValueError("Exactly one of `criterion` and `separator` should be specified")
if criterion is None:
criterion = lambda x: x == separator
group = []
for x in iterable:
if not criterion(x):
group.append(x)
else:
if len(group) > 0 or empty_segments:
yield group
group = []
if len(group) > 0 or empty_segments:
yield group
| true | 2 |
|
71 |
flutes
|
flutes.iterator
|
scanl
|
def scanl(func, iterable, *args):
r"""Computes the intermediate results of :py:func:`~functools.reduce`. Equivalent to Haskell's ``scanl``. For
example:
.. code:: python
>>> list(scanl(operator.add, [1, 2, 3, 4], 0))
[0, 1, 3, 6, 10]
>>> list(scanl(lambda s, x: x + s, ['a', 'b', 'c', 'd']))
['a', 'ba', 'cba', 'dcba']
Learn more at `Learn You a Haskell: Higher Order Functions <http://learnyouahaskell.com/higher-order-functions>`_.
:param func: The function to apply. This should be a binary function where the arguments are: the accumulator,
and the current element.
:param iterable: The list of elements to iteratively apply the function to.
:param initial: The initial value for the accumulator. If not supplied, the first element in the list is used.
:return: The intermediate results at each step.
"""
iterable = iter(iterable)
if len(args) == 1:
acc = args[0]
elif len(args) == 0:
acc = next(iterable)
else:
raise ValueError("Too many arguments")
yield acc
for x in iterable:
acc = func(acc, x)
yield acc
|
[
167,
196
] | false |
[
"__all__",
"T",
"A",
"B",
"R"
] |
import weakref
from typing import Callable, Generic, Iterable, Iterator, List, Optional, Sequence, TypeVar, overload
__all__ = [
"chunk",
"take",
"drop",
"drop_until",
"split_by",
"scanl",
"scanr",
"LazyList",
"Range",
"MapList",
]
T = TypeVar('T')
A = TypeVar('A')
B = TypeVar('B')
R = TypeVar('R')
def scanl(func, iterable, *args):
r"""Computes the intermediate results of :py:func:`~functools.reduce`. Equivalent to Haskell's ``scanl``. For
example:
.. code:: python
>>> list(scanl(operator.add, [1, 2, 3, 4], 0))
[0, 1, 3, 6, 10]
>>> list(scanl(lambda s, x: x + s, ['a', 'b', 'c', 'd']))
['a', 'ba', 'cba', 'dcba']
Learn more at `Learn You a Haskell: Higher Order Functions <http://learnyouahaskell.com/higher-order-functions>`_.
:param func: The function to apply. This should be a binary function where the arguments are: the accumulator,
and the current element.
:param iterable: The list of elements to iteratively apply the function to.
:param initial: The initial value for the accumulator. If not supplied, the first element in the list is used.
:return: The intermediate results at each step.
"""
iterable = iter(iterable)
if len(args) == 1:
acc = args[0]
elif len(args) == 0:
acc = next(iterable)
else:
raise ValueError("Too many arguments")
yield acc
for x in iterable:
acc = func(acc, x)
yield acc
| true | 2 |
|
72 |
flutes
|
flutes.iterator
|
LazyList
|
__iter__
|
def __iter__(self):
if self.exhausted:
return iter(self.list)
return self.LazyListIterator(self)
|
[
257,
260
] | false |
[
"__all__",
"T",
"A",
"B",
"R"
] |
import weakref
from typing import Callable, Generic, Iterable, Iterator, List, Optional, Sequence, TypeVar, overload
__all__ = [
"chunk",
"take",
"drop",
"drop_until",
"split_by",
"scanl",
"scanr",
"LazyList",
"Range",
"MapList",
]
T = TypeVar('T')
A = TypeVar('A')
B = TypeVar('B')
R = TypeVar('R')
class LazyList(Generic[T], Sequence[T]):
def __init__(self, iterable: Iterable[T]):
self.iter = iter(iterable)
self.exhausted = False
self.list: List[T] = []
def __iter__(self):
if self.exhausted:
return iter(self.list)
return self.LazyListIterator(self)
| true | 2 |
73 |
flutes
|
flutes.iterator
|
LazyList
|
__getitem__
|
def __getitem__(self, idx):
if isinstance(idx, slice):
self._fetch_until(idx.stop)
else:
self._fetch_until(idx)
return self.list[idx]
|
[
280,
285
] | false |
[
"__all__",
"T",
"A",
"B",
"R"
] |
import weakref
from typing import Callable, Generic, Iterable, Iterator, List, Optional, Sequence, TypeVar, overload
__all__ = [
"chunk",
"take",
"drop",
"drop_until",
"split_by",
"scanl",
"scanr",
"LazyList",
"Range",
"MapList",
]
T = TypeVar('T')
A = TypeVar('A')
B = TypeVar('B')
R = TypeVar('R')
class LazyList(Generic[T], Sequence[T]):
def __init__(self, iterable: Iterable[T]):
self.iter = iter(iterable)
self.exhausted = False
self.list: List[T] = []
def __getitem__(self, idx):
if isinstance(idx, slice):
self._fetch_until(idx.stop)
else:
self._fetch_until(idx)
return self.list[idx]
| true | 2 |
74 |
flutes
|
flutes.iterator
|
LazyList
|
__len__
|
def __len__(self):
if self.exhausted:
return len(self.list)
else:
raise TypeError("__len__ is not available before the iterable is depleted")
|
[
287,
291
] | false |
[
"__all__",
"T",
"A",
"B",
"R"
] |
import weakref
from typing import Callable, Generic, Iterable, Iterator, List, Optional, Sequence, TypeVar, overload
__all__ = [
"chunk",
"take",
"drop",
"drop_until",
"split_by",
"scanl",
"scanr",
"LazyList",
"Range",
"MapList",
]
T = TypeVar('T')
A = TypeVar('A')
B = TypeVar('B')
R = TypeVar('R')
class LazyList(Generic[T], Sequence[T]):
def __init__(self, iterable: Iterable[T]):
self.iter = iter(iterable)
self.exhausted = False
self.list: List[T] = []
def __len__(self):
if self.exhausted:
return len(self.list)
else:
raise TypeError("__len__ is not available before the iterable is depleted")
| true | 2 |
75 |
flutes
|
flutes.iterator
|
Range
|
__next__
|
def __next__(self) -> int:
if self.val >= self.r:
raise StopIteration
result = self.val
self.val += self.step
return result
|
[
332,
337
] | false |
[
"__all__",
"T",
"A",
"B",
"R"
] |
import weakref
from typing import Callable, Generic, Iterable, Iterator, List, Optional, Sequence, TypeVar, overload
__all__ = [
"chunk",
"take",
"drop",
"drop_until",
"split_by",
"scanl",
"scanr",
"LazyList",
"Range",
"MapList",
]
T = TypeVar('T')
A = TypeVar('A')
B = TypeVar('B')
R = TypeVar('R')
class Range(Sequence[int]):
def __init__(self, *args):
if len(args) == 0 or len(args) > 3:
raise ValueError("Range should be called the same way as the builtin `range`")
if len(args) == 1:
self.l = 0
self.r = args[0]
self.step = 1
else:
self.l = args[0]
self.r = args[1]
self.step = 1 if len(args) == 2 else args[2]
self.val = self.l
self.length = (self.r - self.l) // self.step
def __next__(self) -> int:
if self.val >= self.r:
raise StopIteration
result = self.val
self.val += self.step
return result
| true | 2 |
76 |
flutes
|
flutes.iterator
|
Range
|
__getitem__
|
def __getitem__(self, item):
if isinstance(item, slice):
return [self._get_idx(idx) for idx in range(*item.indices(self.length))]
if item < 0:
item = self.length + item
return self._get_idx(item)
|
[
351,
356
] | false |
[
"__all__",
"T",
"A",
"B",
"R"
] |
import weakref
from typing import Callable, Generic, Iterable, Iterator, List, Optional, Sequence, TypeVar, overload
__all__ = [
"chunk",
"take",
"drop",
"drop_until",
"split_by",
"scanl",
"scanr",
"LazyList",
"Range",
"MapList",
]
T = TypeVar('T')
A = TypeVar('A')
B = TypeVar('B')
R = TypeVar('R')
class Range(Sequence[int]):
def __init__(self, *args):
if len(args) == 0 or len(args) > 3:
raise ValueError("Range should be called the same way as the builtin `range`")
if len(args) == 1:
self.l = 0
self.r = args[0]
self.step = 1
else:
self.l = args[0]
self.r = args[1]
self.step = 1 if len(args) == 2 else args[2]
self.val = self.l
self.length = (self.r - self.l) // self.step
def __getitem__(self, item):
if isinstance(item, slice):
return [self._get_idx(idx) for idx in range(*item.indices(self.length))]
if item < 0:
item = self.length + item
return self._get_idx(item)
| true | 2 |
77 |
flutes
|
flutes.iterator
|
MapList
|
__getitem__
|
def __getitem__(self, item):
if isinstance(item, int):
return self.func(self.list[item])
return [self.func(x) for x in self.list[item]]
|
[
391,
394
] | false |
[
"__all__",
"T",
"A",
"B",
"R"
] |
import weakref
from typing import Callable, Generic, Iterable, Iterator, List, Optional, Sequence, TypeVar, overload
__all__ = [
"chunk",
"take",
"drop",
"drop_until",
"split_by",
"scanl",
"scanr",
"LazyList",
"Range",
"MapList",
]
T = TypeVar('T')
A = TypeVar('A')
B = TypeVar('B')
R = TypeVar('R')
class MapList(Generic[R], Sequence[R]):
def __init__(self, func: Callable[[T], R], lst: Sequence[T]):
self.func = func
self.list = lst
def __getitem__(self, item):
if isinstance(item, int):
return self.func(self.list[item])
return [self.func(x) for x in self.list[item]]
| true | 2 |
78 |
flutils
|
flutils.codecs.b64
|
encode
|
def encode(
text: _STR,
errors: _STR = 'strict'
) -> Tuple[bytes, int]:
"""Convert the given ``text`` of base64 characters into the base64
decoded bytes.
Args:
text (str): The string input. The given string input can span
across many lines and be indented any number of spaces.
errors (str): Not used. This argument exists to meet the
interface requirements. Any value given to this argument
is ignored.
Returns:
bytes: The given ``text`` converted into base64 bytes.
int: The length of the returned bytes.
"""
# Convert the given 'text', that are of type UserString into a str.
text_input = str(text)
# Cleanup whitespace.
text_str = text_input.strip()
text_str = '\n'.join(
filter(
lambda x: len(x) > 0,
map(lambda x: x.strip(), text_str.strip().splitlines())
)
)
# Convert the cleaned text into utf8 bytes
text_bytes = text_str.encode('utf-8')
try:
out = base64.decodebytes(text_bytes)
except Error as e:
raise UnicodeEncodeError(
'b64',
text_input,
0,
len(text),
(
f'{text_str!r} is not a proper bas64 character string: '
f'{e}'
)
)
return out, len(text)
|
[
16,
61
] | true |
[
"_STR",
"NAME"
] |
import base64
import codecs
from binascii import Error
from collections import UserString
from typing import ByteString as _ByteString
from typing import (
Optional,
Tuple,
Union,
)
_STR = Union[str, UserString]
NAME = __name__.split('.')[-1]
def encode(
text: _STR,
errors: _STR = 'strict'
) -> Tuple[bytes, int]:
"""Convert the given ``text`` of base64 characters into the base64
decoded bytes.
Args:
text (str): The string input. The given string input can span
across many lines and be indented any number of spaces.
errors (str): Not used. This argument exists to meet the
interface requirements. Any value given to this argument
is ignored.
Returns:
bytes: The given ``text`` converted into base64 bytes.
int: The length of the returned bytes.
"""
# Convert the given 'text', that are of type UserString into a str.
text_input = str(text)
# Cleanup whitespace.
text_str = text_input.strip()
text_str = '\n'.join(
filter(
lambda x: len(x) > 0,
map(lambda x: x.strip(), text_str.strip().splitlines())
)
)
# Convert the cleaned text into utf8 bytes
text_bytes = text_str.encode('utf-8')
try:
out = base64.decodebytes(text_bytes)
except Error as e:
raise UnicodeEncodeError(
'b64',
text_input,
0,
len(text),
(
f'{text_str!r} is not a proper bas64 character string: '
f'{e}'
)
)
return out, len(text)
| false | 0 |
|
79 |
flutils
|
flutils.codecs.b64
|
register
|
def register() -> None:
"""Register the ``b64`` codec with Python."""
try:
codecs.getdecoder(NAME)
except LookupError:
codecs.register(_get_codec_info)
|
[
109,
114
] | true |
[
"_STR",
"NAME"
] |
import base64
import codecs
from binascii import Error
from collections import UserString
from typing import ByteString as _ByteString
from typing import (
Optional,
Tuple,
Union,
)
_STR = Union[str, UserString]
NAME = __name__.split('.')[-1]
def register() -> None:
"""Register the ``b64`` codec with Python."""
try:
codecs.getdecoder(NAME)
except LookupError:
codecs.register(_get_codec_info)
| false | 0 |
|
80 |
flutils
|
flutils.codecs.raw_utf8_escape
|
decode
|
def decode(
data: _ByteString,
errors: _Str = 'strict'
) -> Tuple[str, int]:
"""Convert a bytes type of escaped utf8 hexadecimal to a string.
Args:
data (bytes or bytearray or memoryview): The escaped utf8
hexadecimal bytes.
errors (str or :obj:`~UserString`): The error checking level.
Returns:
str: The given ``data`` (of escaped utf8 hexadecimal bytes)
converted into a :obj:`str`.
int: The number of the given ``data`` bytes consumed.
Raises:
UnicodeDecodeError: if the given ``data`` contains escaped
utf8 hexadecimal that references invalid utf8 bytes.
"""
# Convert memoryview and bytearray objects to bytes.
data_bytes = bytes(data)
# Convert the given 'errors', that are of type UserString into a str.
errors_input = str(errors)
# Convert the utf8 bytes into a string of latin-1 characters.
# This basically maps the exact utf8 bytes to the string. Also,
# this converts any escaped hexadecimal sequences \\xHH into
# \xHH bytes.
text_str_latin1 = data_bytes.decode('unicode_escape')
# Convert the string of latin-1 characters (which are actually
# utf8 characters) into bytes.
text_bytes_utf8 = text_str_latin1.encode('latin1')
# Convert the utf8 bytes into a string.
try:
out = text_bytes_utf8.decode('utf-8', errors=errors_input)
except UnicodeDecodeError as e:
raise UnicodeDecodeError(
'eutf8h',
data_bytes,
e.start,
e.end,
e.reason
)
return out, len(data)
|
[
90,
139
] | true |
[
"_Str",
"NAME"
] |
import codecs
from collections import UserString
from functools import reduce
from typing import ByteString as _ByteString
from typing import (
Generator,
Optional,
Tuple,
Union,
cast,
)
_Str = Union[str, UserString]
NAME = __name__.split('.')[-1]
def decode(
data: _ByteString,
errors: _Str = 'strict'
) -> Tuple[str, int]:
"""Convert a bytes type of escaped utf8 hexadecimal to a string.
Args:
data (bytes or bytearray or memoryview): The escaped utf8
hexadecimal bytes.
errors (str or :obj:`~UserString`): The error checking level.
Returns:
str: The given ``data`` (of escaped utf8 hexadecimal bytes)
converted into a :obj:`str`.
int: The number of the given ``data`` bytes consumed.
Raises:
UnicodeDecodeError: if the given ``data`` contains escaped
utf8 hexadecimal that references invalid utf8 bytes.
"""
# Convert memoryview and bytearray objects to bytes.
data_bytes = bytes(data)
# Convert the given 'errors', that are of type UserString into a str.
errors_input = str(errors)
# Convert the utf8 bytes into a string of latin-1 characters.
# This basically maps the exact utf8 bytes to the string. Also,
# this converts any escaped hexadecimal sequences \\xHH into
# \xHH bytes.
text_str_latin1 = data_bytes.decode('unicode_escape')
# Convert the string of latin-1 characters (which are actually
# utf8 characters) into bytes.
text_bytes_utf8 = text_str_latin1.encode('latin1')
# Convert the utf8 bytes into a string.
try:
out = text_bytes_utf8.decode('utf-8', errors=errors_input)
except UnicodeDecodeError as e:
raise UnicodeDecodeError(
'eutf8h',
data_bytes,
e.start,
e.end,
e.reason
)
return out, len(data)
| false | 0 |
|
81 |
flutils
|
flutils.codecs.raw_utf8_escape
|
register
|
def register() -> None:
try:
codecs.getdecoder(NAME)
except LookupError:
codecs.register(_get_codec_info)
|
[
157,
161
] | true |
[
"_Str",
"NAME"
] |
import codecs
from collections import UserString
from functools import reduce
from typing import ByteString as _ByteString
from typing import (
Generator,
Optional,
Tuple,
Union,
cast,
)
_Str = Union[str, UserString]
NAME = __name__.split('.')[-1]
def register() -> None:
try:
codecs.getdecoder(NAME)
except LookupError:
codecs.register(_get_codec_info)
| false | 0 |
|
82 |
flutils
|
flutils.decorators
|
cached_property
|
__get__
|
def __get__(self, obj: Any, cls):
if obj is None:
return self
if asyncio.iscoroutinefunction(self.func):
return self._wrap_in_coroutine(obj)
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
|
[
60,
68
] | true |
[
"__all__"
] |
import asyncio
from typing import Any
__all__ = ['cached_property']
class cached_property:
def __init__(self, func):
self.__doc__ = getattr(func, "__doc__")
self.func = func
def __get__(self, obj: Any, cls):
if obj is None:
return self
if asyncio.iscoroutinefunction(self.func):
return self._wrap_in_coroutine(obj)
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
| true | 2 |
83 |
flutils
|
flutils.namedtupleutils
|
to_namedtuple
|
def to_namedtuple(obj: _AllowedTypes) -> Union[NamedTuple, Tuple, List]:
"""Convert particular objects into a namedtuple.
Args:
obj: The object to be converted (or have it's contents converted) to
a :obj:`NamedTuple <collections.namedtuple>`.
If the given type is of :obj:`list` or :obj:`tuple`, each item will be
recursively converted to a :obj:`NamedTuple <collections.namedtuple>`
provided the items can be converted. Items that cannot be converted
will still exist in the returned object.
If the given type is of :obj:`list` the return value will be a new
:obj:`list`. This means the items are not changed in the given
``obj``.
If the given type is of :obj:`Mapping <collections.abc.Mapping>`
(:obj:`dict`), keys that can be proper identifiers will become attributes
on the returned :obj:`NamedTuple <collections.namedtuple>`. Additionally,
the attributes of the returned :obj:`NamedTuple <collections.namedtuple>`
are sorted alphabetically.
If the given type is of :obj:`OrderedDict <collections.OrderedDict>`,
the attributes of the returned :obj:`NamedTuple <collections.namedtuple>`
keep the same order as the given
:obj:`OrderedDict <collections.OrderedDict>` keys.
If the given type is of :obj:`SimpleNamespace <types.SimpleNamespace>`,
The attributes are sorted alphabetically in the returned
:obj:`NamedTuple <collections.NamedTuple>`.
Any identifier (key or attribute name) that starts with an underscore
cannot be used as a :obj:`NamedTuple <collections.namedtuple>` attribute.
All values are recursively converted. This means a dictionary that
contains another dictionary, as one of it's values, will be converted
to a :obj:`NamedTuple <collections.namedtuple>` with the attribute's
value also converted to a :obj:`NamedTuple <collections.namedtuple>`.
:rtype:
:obj:`list`
A list with any of it's values converted to a
:obj:`NamedTuple <collections.namedtuple>`.
:obj:`tuple`
A tuple with any of it's values converted to a
:obj:`NamedTuple <collections.namedtuple>`.
:obj:`NamedTuple <collections.namedtuple>`.
Example:
>>> from flutils.namedtupleutils import to_namedtuple
>>> dic = {'a': 1, 'b': 2}
>>> to_namedtuple(dic)
NamedTuple(a=1, b=2)
"""
return _to_namedtuple(obj)
|
[
31,
89
] | true |
[
"__all__",
"_AllowedTypes"
] |
from collections import (
OrderedDict,
namedtuple,
)
from collections.abc import (
Mapping,
Sequence,
)
from functools import singledispatch
from types import SimpleNamespace
from typing import (
Any,
List,
NamedTuple,
Tuple,
Union,
cast,
)
from flutils.validators import validate_identifier
__all__ = ['to_namedtuple']
_AllowedTypes = Union[
List,
Mapping,
NamedTuple,
SimpleNamespace,
Tuple,
]
def to_namedtuple(obj: _AllowedTypes) -> Union[NamedTuple, Tuple, List]:
"""Convert particular objects into a namedtuple.
Args:
obj: The object to be converted (or have it's contents converted) to
a :obj:`NamedTuple <collections.namedtuple>`.
If the given type is of :obj:`list` or :obj:`tuple`, each item will be
recursively converted to a :obj:`NamedTuple <collections.namedtuple>`
provided the items can be converted. Items that cannot be converted
will still exist in the returned object.
If the given type is of :obj:`list` the return value will be a new
:obj:`list`. This means the items are not changed in the given
``obj``.
If the given type is of :obj:`Mapping <collections.abc.Mapping>`
(:obj:`dict`), keys that can be proper identifiers will become attributes
on the returned :obj:`NamedTuple <collections.namedtuple>`. Additionally,
the attributes of the returned :obj:`NamedTuple <collections.namedtuple>`
are sorted alphabetically.
If the given type is of :obj:`OrderedDict <collections.OrderedDict>`,
the attributes of the returned :obj:`NamedTuple <collections.namedtuple>`
keep the same order as the given
:obj:`OrderedDict <collections.OrderedDict>` keys.
If the given type is of :obj:`SimpleNamespace <types.SimpleNamespace>`,
The attributes are sorted alphabetically in the returned
:obj:`NamedTuple <collections.NamedTuple>`.
Any identifier (key or attribute name) that starts with an underscore
cannot be used as a :obj:`NamedTuple <collections.namedtuple>` attribute.
All values are recursively converted. This means a dictionary that
contains another dictionary, as one of it's values, will be converted
to a :obj:`NamedTuple <collections.namedtuple>` with the attribute's
value also converted to a :obj:`NamedTuple <collections.namedtuple>`.
:rtype:
:obj:`list`
A list with any of it's values converted to a
:obj:`NamedTuple <collections.namedtuple>`.
:obj:`tuple`
A tuple with any of it's values converted to a
:obj:`NamedTuple <collections.namedtuple>`.
:obj:`NamedTuple <collections.namedtuple>`.
Example:
>>> from flutils.namedtupleutils import to_namedtuple
>>> dic = {'a': 1, 'b': 2}
>>> to_namedtuple(dic)
NamedTuple(a=1, b=2)
"""
return _to_namedtuple(obj)
| false | 0 |
|
84 |
flutils
|
flutils.objutils
|
has_any_attrs
|
def has_any_attrs(obj: _Any, *attrs: str) -> bool:
"""Check if the given ``obj`` has **ANY** of the given ``*attrs``.
Args:
obj (:obj:`Any <typing.Any>`): The object to check.
*attrs (:obj:`str`): The names of the attributes to check.
:rtype:
:obj:`bool`
* :obj:`True` if any of the given ``*attrs`` exist on the given
``obj``;
* :obj:`False` otherwise.
Example:
>>> from flutils.objutils import has_any_attrs
>>> has_any_attrs(dict(),'get','keys','items','values','something')
True
"""
for attr in attrs:
if hasattr(obj, attr) is True:
return True
return False
|
[
35,
57
] | true |
[
"__all__",
"_LIST_LIKE"
] |
from collections import (
UserList,
deque,
)
from collections.abc import (
Iterator,
KeysView,
ValuesView,
)
from typing import Any as _Any
__all__ = [
'has_any_attrs',
'has_any_callables',
'has_attrs',
'has_callables',
'is_list_like',
'is_subclass_of_any',
]
_LIST_LIKE = (
list,
set,
frozenset,
tuple,
deque,
Iterator,
ValuesView,
KeysView,
UserList
)
def has_any_attrs(obj: _Any, *attrs: str) -> bool:
"""Check if the given ``obj`` has **ANY** of the given ``*attrs``.
Args:
obj (:obj:`Any <typing.Any>`): The object to check.
*attrs (:obj:`str`): The names of the attributes to check.
:rtype:
:obj:`bool`
* :obj:`True` if any of the given ``*attrs`` exist on the given
``obj``;
* :obj:`False` otherwise.
Example:
>>> from flutils.objutils import has_any_attrs
>>> has_any_attrs(dict(),'get','keys','items','values','something')
True
"""
for attr in attrs:
if hasattr(obj, attr) is True:
return True
return False
| true | 2 |
|
85 |
flutils
|
flutils.objutils
|
has_any_callables
|
def has_any_callables(obj: _Any, *attrs: str) -> bool:
"""Check if the given ``obj`` has **ANY** of the given ``attrs`` and are
callable.
Args:
obj (:obj:`Any <typing.Any>`): The object to check.
*attrs (:obj:`str`): The names of the attributes to check.
:rtype:
:obj:`bool`
* :obj:`True` if ANY of the given ``*attrs`` exist on the given ``obj``
and ANY are callable;
* :obj:`False` otherwise.
Example:
>>> from flutils.objutils import has_any_callables
>>> has_any_callables(dict(),'get','keys','items','values','foo')
True
"""
if has_any_attrs(obj, *attrs) is True:
for attr in attrs:
if callable(getattr(obj, attr)) is True:
return True
return False
|
[
60,
84
] | true |
[
"__all__",
"_LIST_LIKE"
] |
from collections import (
UserList,
deque,
)
from collections.abc import (
Iterator,
KeysView,
ValuesView,
)
from typing import Any as _Any
__all__ = [
'has_any_attrs',
'has_any_callables',
'has_attrs',
'has_callables',
'is_list_like',
'is_subclass_of_any',
]
_LIST_LIKE = (
list,
set,
frozenset,
tuple,
deque,
Iterator,
ValuesView,
KeysView,
UserList
)
def has_any_callables(obj: _Any, *attrs: str) -> bool:
"""Check if the given ``obj`` has **ANY** of the given ``attrs`` and are
callable.
Args:
obj (:obj:`Any <typing.Any>`): The object to check.
*attrs (:obj:`str`): The names of the attributes to check.
:rtype:
:obj:`bool`
* :obj:`True` if ANY of the given ``*attrs`` exist on the given ``obj``
and ANY are callable;
* :obj:`False` otherwise.
Example:
>>> from flutils.objutils import has_any_callables
>>> has_any_callables(dict(),'get','keys','items','values','foo')
True
"""
if has_any_attrs(obj, *attrs) is True:
for attr in attrs:
if callable(getattr(obj, attr)) is True:
return True
return False
| true | 2 |
|
86 |
flutils
|
flutils.objutils
|
has_attrs
|
def has_attrs(
obj: _Any,
*attrs: str
) -> bool:
"""Check if given ``obj`` has all the given ``*attrs``.
Args:
obj (:obj:`Any <typing.Any>`): The object to check.
*attrs (:obj:`str`): The names of the attributes to check.
:rtype:
:obj:`bool`
* :obj:`True` if all the given ``*attrs`` exist on the given ``obj``;
* :obj:`False` otherwise.
Example:
>>> from flutils.objutils import has_attrs
>>> has_attrs(dict(),'get','keys','items','values')
True
"""
for attr in attrs:
if hasattr(obj, attr) is False:
return False
return True
|
[
87,
111
] | true |
[
"__all__",
"_LIST_LIKE"
] |
from collections import (
UserList,
deque,
)
from collections.abc import (
Iterator,
KeysView,
ValuesView,
)
from typing import Any as _Any
__all__ = [
'has_any_attrs',
'has_any_callables',
'has_attrs',
'has_callables',
'is_list_like',
'is_subclass_of_any',
]
_LIST_LIKE = (
list,
set,
frozenset,
tuple,
deque,
Iterator,
ValuesView,
KeysView,
UserList
)
def has_attrs(
obj: _Any,
*attrs: str
) -> bool:
"""Check if given ``obj`` has all the given ``*attrs``.
Args:
obj (:obj:`Any <typing.Any>`): The object to check.
*attrs (:obj:`str`): The names of the attributes to check.
:rtype:
:obj:`bool`
* :obj:`True` if all the given ``*attrs`` exist on the given ``obj``;
* :obj:`False` otherwise.
Example:
>>> from flutils.objutils import has_attrs
>>> has_attrs(dict(),'get','keys','items','values')
True
"""
for attr in attrs:
if hasattr(obj, attr) is False:
return False
return True
| true | 2 |
|
87 |
flutils
|
flutils.objutils
|
has_callables
|
def has_callables(
obj: _Any,
*attrs: str
) -> bool:
"""Check if given ``obj`` has all the given ``attrs`` and are callable.
Args:
obj (:obj:`Any <typing.Any>`): The object to check.
*attrs (:obj:`str`): The names of the attributes to check.
:rtype:
:obj:`bool`
* :obj:`True` if all the given ``*attrs`` exist on the given ``obj``
and all are callable;
* :obj:`False` otherwise.
Example:
>>> from flutils.objutils import has_callables
>>> has_callables(dict(),'get','keys','items','values')
True
"""
if has_attrs(obj, *attrs) is True:
for attr in attrs:
if callable(getattr(obj, attr)) is False:
return False
return True
return False
|
[
115,
142
] | true |
[
"__all__",
"_LIST_LIKE"
] |
from collections import (
UserList,
deque,
)
from collections.abc import (
Iterator,
KeysView,
ValuesView,
)
from typing import Any as _Any
__all__ = [
'has_any_attrs',
'has_any_callables',
'has_attrs',
'has_callables',
'is_list_like',
'is_subclass_of_any',
]
_LIST_LIKE = (
list,
set,
frozenset,
tuple,
deque,
Iterator,
ValuesView,
KeysView,
UserList
)
def has_callables(
obj: _Any,
*attrs: str
) -> bool:
"""Check if given ``obj`` has all the given ``attrs`` and are callable.
Args:
obj (:obj:`Any <typing.Any>`): The object to check.
*attrs (:obj:`str`): The names of the attributes to check.
:rtype:
:obj:`bool`
* :obj:`True` if all the given ``*attrs`` exist on the given ``obj``
and all are callable;
* :obj:`False` otherwise.
Example:
>>> from flutils.objutils import has_callables
>>> has_callables(dict(),'get','keys','items','values')
True
"""
if has_attrs(obj, *attrs) is True:
for attr in attrs:
if callable(getattr(obj, attr)) is False:
return False
return True
return False
| true | 2 |
|
88 |
flutils
|
flutils.packages
|
bump_version
|
def bump_version(
version: str,
position: int = 2,
pre_release: Optional[str] = None
) -> str:
"""Increase the version number from a version number string.
*New in version 0.3*
Args:
version (str): The version number to be bumped.
position (int, optional): The position (starting with zero) of the
version number component to be increased. Defaults to: ``2``
pre_release (str, Optional): A value of ``a`` or ``alpha`` will
create or increase an alpha version number. A value of ``b`` or
``beta`` will create or increase a beta version number.
Raises:
ValueError: if the given ``version`` is an invalid version number.
ValueError: if the given ``position`` does not exist.
ValueError: if the given ``prerelease`` is not in:
``a, alpha, b, beta``
ValueError: if trying to 'major' part, of a version number, to
a pre-release version.
:rtype:
:obj:`str`
* The increased version number.
Examples:
>>> from flutils.packages import bump_version
>>> bump_version('1.2.2')
'1.2.3'
>>> bump_version('1.2.3', position=1)
'1.3'
>>> bump_version('1.3.4', position=0)
'2.0'
>>> bump_version('1.2.3', prerelease='a')
'1.2.4a0'
>>> bump_version('1.2.4a0', pre_release='a')
'1.2.4a1'
>>> bump_version('1.2.4a1', pre_release='b')
'1.2.4b0'
>>> bump_version('1.2.4a1')
'1.2.4'
>>> bump_version('1.2.4b0')
'1.2.4'
>>> bump_version('2.1.3', position=1, pre_release='a')
'2.2a0'
>>> bump_version('1.2b0', position=2)
'1.2.1'
"""
ver_info = _build_version_info(version)
position = _build_version_bump_position(position)
bump_type = _build_version_bump_type(position, pre_release)
# noinspection PyUnusedLocal
hold: List[Union[int, str]] = []
if bump_type == _BUMP_VERSION_MAJOR:
hold = [ver_info.major.num + 1, 0]
elif bump_type in _BUMP_VERSION_MINORS:
if bump_type == _BUMP_VERSION_MINOR:
if ver_info.minor.pre_txt:
hold = [ver_info.major.num, ver_info.minor.num]
else:
hold = [ver_info.major.num, ver_info.minor.num + 1]
else:
if bump_type == _BUMP_VERSION_MINOR_ALPHA:
if ver_info.minor.pre_txt == 'a':
part = '%sa%s' % (
ver_info.minor.num,
ver_info.minor.pre_num + 1
)
else:
part = '{}a0'.format(ver_info.minor.num + 1)
else:
if ver_info.minor.pre_txt == 'a':
part = '{}b0'.format(ver_info.minor.num)
elif ver_info.minor.pre_txt == 'b':
part = '%sb%s' % (
ver_info.minor.num,
ver_info.minor.pre_num + 1
)
else:
part = '{}b0'.format(ver_info.minor.num + 1)
hold = [ver_info.major.num, part]
else:
if bump_type == _BUMP_VERSION_PATCH:
if ver_info.patch.pre_txt:
hold = [
ver_info.major.num,
ver_info.minor.num,
ver_info.patch.num
]
else:
hold = [
ver_info.major.num,
ver_info.minor.num,
ver_info.patch.num + 1
]
else:
if bump_type == _BUMP_VERSION_PATCH_ALPHA:
if ver_info.patch.pre_txt == 'a':
part = '%sa%s' % (
ver_info.patch.num,
ver_info.patch.pre_num + 1
)
else:
part = '{}a0'.format(ver_info.patch.num + 1)
else:
if ver_info.patch.pre_txt == 'a':
part = '{}b0'.format(ver_info.patch.num)
elif ver_info.patch.pre_txt == 'b':
part = '%sb%s' % (
ver_info.patch.num,
ver_info.patch.pre_num + 1
)
else:
part = '{}b0'.format(ver_info.patch.num + 1)
hold = [ver_info.major.num, ver_info.minor.num, part]
out = '.'.join(map(str, hold))
return out
|
[
168,
291
] | true |
[
"__all__",
"_BUMP_VERSION_MAJOR",
"_BUMP_VERSION_MINOR",
"_BUMP_VERSION_PATCH",
"_BUMP_VERSION_MINOR_ALPHA",
"_BUMP_VERSION_MINOR_BETA",
"_BUMP_VERSION_MINORS",
"_BUMP_VERSION_PATCH_ALPHA",
"_BUMP_VERSION_PATCH_BETA",
"_BUMP_VERSION_PATCHES",
"_BUMP_VERSION_POSITION_NAMES"
] |
from typing import (
Any,
Dict,
Generator,
List,
NamedTuple,
Optional,
Tuple,
Union,
cast,
)
from distutils.version import StrictVersion
__all__ = ['bump_version']
_BUMP_VERSION_MAJOR: int = 0
_BUMP_VERSION_MINOR: int = 1
_BUMP_VERSION_PATCH: int = 2
_BUMP_VERSION_MINOR_ALPHA: int = 3
_BUMP_VERSION_MINOR_BETA: int = 4
_BUMP_VERSION_MINORS: Tuple[int, ...] = (
_BUMP_VERSION_MINOR,
_BUMP_VERSION_MINOR_ALPHA,
_BUMP_VERSION_MINOR_BETA,
)
_BUMP_VERSION_PATCH_ALPHA: int = 5
_BUMP_VERSION_PATCH_BETA: int = 6
_BUMP_VERSION_PATCHES: Tuple[int, ...] = (
_BUMP_VERSION_PATCH,
_BUMP_VERSION_PATCH_ALPHA,
_BUMP_VERSION_PATCH_BETA,
)
_BUMP_VERSION_POSITION_NAMES: Dict[int, str] = {
_BUMP_VERSION_MAJOR: 'major',
_BUMP_VERSION_MINOR: 'minor',
_BUMP_VERSION_PATCH: 'patch',
}
def bump_version(
version: str,
position: int = 2,
pre_release: Optional[str] = None
) -> str:
"""Increase the version number from a version number string.
*New in version 0.3*
Args:
version (str): The version number to be bumped.
position (int, optional): The position (starting with zero) of the
version number component to be increased. Defaults to: ``2``
pre_release (str, Optional): A value of ``a`` or ``alpha`` will
create or increase an alpha version number. A value of ``b`` or
``beta`` will create or increase a beta version number.
Raises:
ValueError: if the given ``version`` is an invalid version number.
ValueError: if the given ``position`` does not exist.
ValueError: if the given ``prerelease`` is not in:
``a, alpha, b, beta``
ValueError: if trying to 'major' part, of a version number, to
a pre-release version.
:rtype:
:obj:`str`
* The increased version number.
Examples:
>>> from flutils.packages import bump_version
>>> bump_version('1.2.2')
'1.2.3'
>>> bump_version('1.2.3', position=1)
'1.3'
>>> bump_version('1.3.4', position=0)
'2.0'
>>> bump_version('1.2.3', prerelease='a')
'1.2.4a0'
>>> bump_version('1.2.4a0', pre_release='a')
'1.2.4a1'
>>> bump_version('1.2.4a1', pre_release='b')
'1.2.4b0'
>>> bump_version('1.2.4a1')
'1.2.4'
>>> bump_version('1.2.4b0')
'1.2.4'
>>> bump_version('2.1.3', position=1, pre_release='a')
'2.2a0'
>>> bump_version('1.2b0', position=2)
'1.2.1'
"""
ver_info = _build_version_info(version)
position = _build_version_bump_position(position)
bump_type = _build_version_bump_type(position, pre_release)
# noinspection PyUnusedLocal
hold: List[Union[int, str]] = []
if bump_type == _BUMP_VERSION_MAJOR:
hold = [ver_info.major.num + 1, 0]
elif bump_type in _BUMP_VERSION_MINORS:
if bump_type == _BUMP_VERSION_MINOR:
if ver_info.minor.pre_txt:
hold = [ver_info.major.num, ver_info.minor.num]
else:
hold = [ver_info.major.num, ver_info.minor.num + 1]
else:
if bump_type == _BUMP_VERSION_MINOR_ALPHA:
if ver_info.minor.pre_txt == 'a':
part = '%sa%s' % (
ver_info.minor.num,
ver_info.minor.pre_num + 1
)
else:
part = '{}a0'.format(ver_info.minor.num + 1)
else:
if ver_info.minor.pre_txt == 'a':
part = '{}b0'.format(ver_info.minor.num)
elif ver_info.minor.pre_txt == 'b':
part = '%sb%s' % (
ver_info.minor.num,
ver_info.minor.pre_num + 1
)
else:
part = '{}b0'.format(ver_info.minor.num + 1)
hold = [ver_info.major.num, part]
else:
if bump_type == _BUMP_VERSION_PATCH:
if ver_info.patch.pre_txt:
hold = [
ver_info.major.num,
ver_info.minor.num,
ver_info.patch.num
]
else:
hold = [
ver_info.major.num,
ver_info.minor.num,
ver_info.patch.num + 1
]
else:
if bump_type == _BUMP_VERSION_PATCH_ALPHA:
if ver_info.patch.pre_txt == 'a':
part = '%sa%s' % (
ver_info.patch.num,
ver_info.patch.pre_num + 1
)
else:
part = '{}a0'.format(ver_info.patch.num + 1)
else:
if ver_info.patch.pre_txt == 'a':
part = '{}b0'.format(ver_info.patch.num)
elif ver_info.patch.pre_txt == 'b':
part = '%sb%s' % (
ver_info.patch.num,
ver_info.patch.pre_num + 1
)
else:
part = '{}b0'.format(ver_info.patch.num + 1)
hold = [ver_info.major.num, ver_info.minor.num, part]
out = '.'.join(map(str, hold))
return out
| true | 2 |
|
89 |
flutils
|
flutils.pathutils
|
chmod
|
def chmod(
path: _PATH,
mode_file: Optional[int] = None,
mode_dir: Optional[int] = None,
include_parent: bool = False
) -> None:
"""Change the mode of a path.
This function processes the given ``path`` with
:obj:`~flutils.normalize_path`.
If the given ``path`` does NOT exist, nothing will be done.
This function will **NOT** change the mode of:
- symlinks (symlink targets that are files or directories will be changed)
- sockets
- fifo
- block devices
- char devices
Args:
path (:obj:`str`, :obj:`bytes` or :obj:`Path <pathlib.Path>`):
The path of the file or directory to have it's mode changed. This
value can be a :term:`glob pattern`.
mode_file (:obj:`int`, optional): The mode applied to the given
``path`` that is a file or a symlink target that is a file.
Defaults to ``0o600``.
mode_dir (:obj:`int`, optional): The mode applied to the given
``path`` that is a directory or a symlink target that is a
directory. Defaults to ``0o700``.
include_parent (:obj:`bool`, optional): A value of :obj:`True`` will
chmod the parent directory of the given ``path`` that contains a
a :term:`glob pattern`. Defaults to :obj:`False`.
:rtype: :obj:`None`
Examples:
>>> from flutils.pathutils import chmod
>>> chmod('~/tmp/flutils.tests.osutils.txt', 0o660)
Supports a :term:`glob pattern`. So to recursively change the mode
of a directory just do:
>>> chmod('~/tmp/**', mode_file=0o644, mode_dir=0o770)
To change the mode of a directory's immediate contents:
>>> chmod('~/tmp/*')
"""
path = normalize_path(path)
if mode_file is None:
mode_file = 0o600
if mode_dir is None:
mode_dir = 0o700
if '*' in path.as_posix():
try:
for sub_path in Path().glob(path.as_posix()):
if sub_path.is_dir() is True:
sub_path.chmod(mode_dir)
elif sub_path.is_file():
sub_path.chmod(mode_file)
# Path().glob() returns an iterator that will
# raise NotImplementedError if there
# are no results from the glob pattern.
except NotImplementedError:
pass
else:
if include_parent is True:
parent = path.parent
if parent.is_dir():
parent.chmod(mode_dir)
else:
if path.exists() is True:
if path.is_dir():
path.chmod(mode_dir)
elif path.is_file():
path.chmod(mode_file)
|
[
50,
134
] | true |
[
"__all__",
"_PATH",
"_STR_OR_INT_OR_NONE"
] |
import functools
import getpass
import grp
import os
import pwd
import sys
from collections import deque
from os import PathLike
from pathlib import (
Path,
PosixPath,
WindowsPath,
)
from typing import (
Deque,
Generator,
Optional,
Union,
cast,
)
__all__ = [
'chmod',
'chown',
'directory_present',
'exists_as',
'find_paths',
'get_os_group',
'get_os_user',
'normalize_path',
'path_absent',
]
_PATH = Union[
PathLike,
PosixPath,
WindowsPath,
bytes,
str,
]
_STR_OR_INT_OR_NONE = Union[
str,
int,
None
]
def chmod(
path: _PATH,
mode_file: Optional[int] = None,
mode_dir: Optional[int] = None,
include_parent: bool = False
) -> None:
"""Change the mode of a path.
This function processes the given ``path`` with
:obj:`~flutils.normalize_path`.
If the given ``path`` does NOT exist, nothing will be done.
This function will **NOT** change the mode of:
- symlinks (symlink targets that are files or directories will be changed)
- sockets
- fifo
- block devices
- char devices
Args:
path (:obj:`str`, :obj:`bytes` or :obj:`Path <pathlib.Path>`):
The path of the file or directory to have it's mode changed. This
value can be a :term:`glob pattern`.
mode_file (:obj:`int`, optional): The mode applied to the given
``path`` that is a file or a symlink target that is a file.
Defaults to ``0o600``.
mode_dir (:obj:`int`, optional): The mode applied to the given
``path`` that is a directory or a symlink target that is a
directory. Defaults to ``0o700``.
include_parent (:obj:`bool`, optional): A value of :obj:`True`` will
chmod the parent directory of the given ``path`` that contains a
a :term:`glob pattern`. Defaults to :obj:`False`.
:rtype: :obj:`None`
Examples:
>>> from flutils.pathutils import chmod
>>> chmod('~/tmp/flutils.tests.osutils.txt', 0o660)
Supports a :term:`glob pattern`. So to recursively change the mode
of a directory just do:
>>> chmod('~/tmp/**', mode_file=0o644, mode_dir=0o770)
To change the mode of a directory's immediate contents:
>>> chmod('~/tmp/*')
"""
path = normalize_path(path)
if mode_file is None:
mode_file = 0o600
if mode_dir is None:
mode_dir = 0o700
if '*' in path.as_posix():
try:
for sub_path in Path().glob(path.as_posix()):
if sub_path.is_dir() is True:
sub_path.chmod(mode_dir)
elif sub_path.is_file():
sub_path.chmod(mode_file)
# Path().glob() returns an iterator that will
# raise NotImplementedError if there
# are no results from the glob pattern.
except NotImplementedError:
pass
else:
if include_parent is True:
parent = path.parent
if parent.is_dir():
parent.chmod(mode_dir)
else:
if path.exists() is True:
if path.is_dir():
path.chmod(mode_dir)
elif path.is_file():
path.chmod(mode_file)
| true | 2 |
|
90 |
flutils
|
flutils.pathutils
|
chown
|
def chown(
path: _PATH,
user: Optional[str] = None,
group: Optional[str] = None,
include_parent: bool = False
) -> None:
"""Change ownership of a path.
This function processes the given ``path`` with
:obj:`~flutils.normalize_path`.
If the given ``path`` does NOT exist, nothing will be done.
Args:
path (:obj:`str`, :obj:`bytes` or :obj:`Path <pathlib.Path>`):
The path of the file or directory that will have it's ownership
changed. This value can be a :term:`glob pattern`.
user (:obj:`str` or :obj:`int`, optional): The "login name" used to set
the owner of ``path``. A value of ``'-1'`` will leave the
owner unchanged. Defaults to the "login name" of the current user.
group (:obj:`str` or :obj:`int`, optional): The group name used to set
the group of ``path``. A value of ``'-1'`` will leave the
group unchanged. Defaults to the current user's group.
include_parent (:obj:`bool`, optional): A value of :obj:`True` will
chown the parent directory of the given ``path`` that contains
a :term:`glob pattern`. Defaults to :obj:`False`.
Raises:
OSError: If the given :obj:`user` does not exist as a "login
name" for this operating system.
OSError: If the given :obj:`group` does not exist as a "group
name" for this operating system.
:rtype: :obj:`None`
Examples:
>>> from flutils.pathutils import chown
>>> chown('~/tmp/flutils.tests.osutils.txt')
Supports a :term:`glob pattern`. So to recursively change the
ownership of a directory just do:
>>> chown('~/tmp/**')
To change ownership of all the directory's immediate contents:
>>> chown('~/tmp/*', user='foo', group='bar')
"""
path = normalize_path(path)
if isinstance(user, str) and user == '-1':
uid = -1
else:
uid = get_os_user(user).pw_uid
if isinstance(user, str) and group == '-1':
gid = -1
else:
gid = get_os_group(group).gr_gid
if '*' in path.as_posix():
try:
for sub_path in Path().glob(path.as_posix()):
if sub_path.is_dir() or sub_path.is_file():
os.chown(sub_path.as_posix(), uid, gid)
except NotImplementedError:
# Path().glob() returns an iterator that will
# raise NotImplementedError if there
# are no results from the glob pattern.
pass
else:
if include_parent is True:
path = path.parent
if path.is_dir() is True:
os.chown(path.as_posix(), uid, gid)
else:
if path.exists() is True:
os.chown(path.as_posix(), uid, gid)
|
[
137,
215
] | true |
[
"__all__",
"_PATH",
"_STR_OR_INT_OR_NONE"
] |
import functools
import getpass
import grp
import os
import pwd
import sys
from collections import deque
from os import PathLike
from pathlib import (
Path,
PosixPath,
WindowsPath,
)
from typing import (
Deque,
Generator,
Optional,
Union,
cast,
)
__all__ = [
'chmod',
'chown',
'directory_present',
'exists_as',
'find_paths',
'get_os_group',
'get_os_user',
'normalize_path',
'path_absent',
]
_PATH = Union[
PathLike,
PosixPath,
WindowsPath,
bytes,
str,
]
_STR_OR_INT_OR_NONE = Union[
str,
int,
None
]
def chown(
path: _PATH,
user: Optional[str] = None,
group: Optional[str] = None,
include_parent: bool = False
) -> None:
"""Change ownership of a path.
This function processes the given ``path`` with
:obj:`~flutils.normalize_path`.
If the given ``path`` does NOT exist, nothing will be done.
Args:
path (:obj:`str`, :obj:`bytes` or :obj:`Path <pathlib.Path>`):
The path of the file or directory that will have it's ownership
changed. This value can be a :term:`glob pattern`.
user (:obj:`str` or :obj:`int`, optional): The "login name" used to set
the owner of ``path``. A value of ``'-1'`` will leave the
owner unchanged. Defaults to the "login name" of the current user.
group (:obj:`str` or :obj:`int`, optional): The group name used to set
the group of ``path``. A value of ``'-1'`` will leave the
group unchanged. Defaults to the current user's group.
include_parent (:obj:`bool`, optional): A value of :obj:`True` will
chown the parent directory of the given ``path`` that contains
a :term:`glob pattern`. Defaults to :obj:`False`.
Raises:
OSError: If the given :obj:`user` does not exist as a "login
name" for this operating system.
OSError: If the given :obj:`group` does not exist as a "group
name" for this operating system.
:rtype: :obj:`None`
Examples:
>>> from flutils.pathutils import chown
>>> chown('~/tmp/flutils.tests.osutils.txt')
Supports a :term:`glob pattern`. So to recursively change the
ownership of a directory just do:
>>> chown('~/tmp/**')
To change ownership of all the directory's immediate contents:
>>> chown('~/tmp/*', user='foo', group='bar')
"""
path = normalize_path(path)
if isinstance(user, str) and user == '-1':
uid = -1
else:
uid = get_os_user(user).pw_uid
if isinstance(user, str) and group == '-1':
gid = -1
else:
gid = get_os_group(group).gr_gid
if '*' in path.as_posix():
try:
for sub_path in Path().glob(path.as_posix()):
if sub_path.is_dir() or sub_path.is_file():
os.chown(sub_path.as_posix(), uid, gid)
except NotImplementedError:
# Path().glob() returns an iterator that will
# raise NotImplementedError if there
# are no results from the glob pattern.
pass
else:
if include_parent is True:
path = path.parent
if path.is_dir() is True:
os.chown(path.as_posix(), uid, gid)
else:
if path.exists() is True:
os.chown(path.as_posix(), uid, gid)
| true | 2 |
|
91 |
flutils
|
flutils.pathutils
|
directory_present
|
def directory_present(
path: _PATH,
mode: Optional[int] = None,
user: Optional[str] = None,
group: Optional[str] = None,
) -> Path:
"""Ensure the state of the given :obj:`path` is present and a directory.
This function processes the given ``path`` with
:obj:`~flutils.normalize_path`.
If the given ``path`` does **NOT** exist, it will be created as a
directory.
If the parent paths of the given ``path`` do not exist, they will also be
created with the ``mode``, ``user`` and ``group``.
If the given ``path`` does exist as a directory, the ``mode``, ``user``,
and :``group`` will be applied.
Args:
path (:obj:`str`, :obj:`bytes` or :obj:`Path <pathlib.Path>`):
The path of the directory.
mode (:obj:`int`, optional): The mode applied to the ``path``.
Defaults to ``0o700``.
user (:obj:`str` or :obj:`int`, optional): The "login name" used to
set the owner of the given ``path``. A value of ``'-1'`` will
leave the owner unchanged. Defaults to the "login name" of the
current user.
group (:obj:`str` or :obj:`int`, optional): The group name used to set
the group of the given ``path``. A value of ``'-1'`` will leave
the group unchanged. Defaults to the current user's group.
Raises:
ValueError: if the given ``path`` contains a glob pattern.
ValueError: if the given ``path`` is not an absolute path.
FileExistsError: if the given ``path`` exists and is not a directory.
FileExistsError: if a parent of the given ``path`` exists and is
not a directory.
:rtype: :obj:`Path <pathlib.Path>`
* :obj:`PosixPath <pathlib.PosixPath>` or
:obj:`WindowsPath <pathlib.WindowsPath>` depending on the system.
.. Note:: :obj:`Path <pathlib.Path>` objects are immutable. Therefore,
any given ``path`` of type :obj:`Path <pathlib.Path>` will not be
the same object returned.
Example:
>>> from flutils.pathutils import directory_present
>>> directory_present('~/tmp/test_path')
PosixPath('/Users/len/tmp/test_path')
"""
path = normalize_path(path)
if '*' in path.as_posix():
raise ValueError(
'The path: %r must NOT contain any glob patterns.'
% path.as_posix()
)
if path.is_absolute() is False:
raise ValueError(
'The path: %r must be an absolute path. A path is considered '
'absolute if it has both a root and (if the flavour allows) a '
'drive.'
% path.as_posix()
)
# Create a queue of paths to be created as directories.
paths: Deque = deque()
path_exists_as = exists_as(path)
if path_exists_as == '':
paths.append(path)
elif path_exists_as != 'directory':
raise FileExistsError(
'The path: %r can NOT be created as a directory because it '
'already exists as a %s.' % (path.as_posix(), path_exists_as)
)
parent = path.parent
child = path
# Traverse the path backwards and add any directories that
# do no exist to the path queue.
while child.as_posix() != parent.as_posix():
parent_exists_as = exists_as(parent)
if parent_exists_as == '':
paths.appendleft(parent)
child = parent
parent = parent.parent
elif parent_exists_as == 'directory':
break
else:
raise FileExistsError(
'Unable to create the directory: %r because the'
'parent path: %r exists as a %s.'
% (path.as_posix, parent.as_posix(), parent_exists_as)
)
if mode is None:
mode = 0o700
if paths:
for build_path in paths:
build_path.mkdir(mode=mode)
chown(build_path, user=user, group=group)
else:
# The given path already existed only need to do a chown.
chmod(path, mode_dir=mode)
chown(path, user=user, group=group)
return path
|
[
218,
332
] | true |
[
"__all__",
"_PATH",
"_STR_OR_INT_OR_NONE"
] |
import functools
import getpass
import grp
import os
import pwd
import sys
from collections import deque
from os import PathLike
from pathlib import (
Path,
PosixPath,
WindowsPath,
)
from typing import (
Deque,
Generator,
Optional,
Union,
cast,
)
__all__ = [
'chmod',
'chown',
'directory_present',
'exists_as',
'find_paths',
'get_os_group',
'get_os_user',
'normalize_path',
'path_absent',
]
_PATH = Union[
PathLike,
PosixPath,
WindowsPath,
bytes,
str,
]
_STR_OR_INT_OR_NONE = Union[
str,
int,
None
]
def directory_present(
path: _PATH,
mode: Optional[int] = None,
user: Optional[str] = None,
group: Optional[str] = None,
) -> Path:
"""Ensure the state of the given :obj:`path` is present and a directory.
This function processes the given ``path`` with
:obj:`~flutils.normalize_path`.
If the given ``path`` does **NOT** exist, it will be created as a
directory.
If the parent paths of the given ``path`` do not exist, they will also be
created with the ``mode``, ``user`` and ``group``.
If the given ``path`` does exist as a directory, the ``mode``, ``user``,
and :``group`` will be applied.
Args:
path (:obj:`str`, :obj:`bytes` or :obj:`Path <pathlib.Path>`):
The path of the directory.
mode (:obj:`int`, optional): The mode applied to the ``path``.
Defaults to ``0o700``.
user (:obj:`str` or :obj:`int`, optional): The "login name" used to
set the owner of the given ``path``. A value of ``'-1'`` will
leave the owner unchanged. Defaults to the "login name" of the
current user.
group (:obj:`str` or :obj:`int`, optional): The group name used to set
the group of the given ``path``. A value of ``'-1'`` will leave
the group unchanged. Defaults to the current user's group.
Raises:
ValueError: if the given ``path`` contains a glob pattern.
ValueError: if the given ``path`` is not an absolute path.
FileExistsError: if the given ``path`` exists and is not a directory.
FileExistsError: if a parent of the given ``path`` exists and is
not a directory.
:rtype: :obj:`Path <pathlib.Path>`
* :obj:`PosixPath <pathlib.PosixPath>` or
:obj:`WindowsPath <pathlib.WindowsPath>` depending on the system.
.. Note:: :obj:`Path <pathlib.Path>` objects are immutable. Therefore,
any given ``path`` of type :obj:`Path <pathlib.Path>` will not be
the same object returned.
Example:
>>> from flutils.pathutils import directory_present
>>> directory_present('~/tmp/test_path')
PosixPath('/Users/len/tmp/test_path')
"""
path = normalize_path(path)
if '*' in path.as_posix():
raise ValueError(
'The path: %r must NOT contain any glob patterns.'
% path.as_posix()
)
if path.is_absolute() is False:
raise ValueError(
'The path: %r must be an absolute path. A path is considered '
'absolute if it has both a root and (if the flavour allows) a '
'drive.'
% path.as_posix()
)
# Create a queue of paths to be created as directories.
paths: Deque = deque()
path_exists_as = exists_as(path)
if path_exists_as == '':
paths.append(path)
elif path_exists_as != 'directory':
raise FileExistsError(
'The path: %r can NOT be created as a directory because it '
'already exists as a %s.' % (path.as_posix(), path_exists_as)
)
parent = path.parent
child = path
# Traverse the path backwards and add any directories that
# do no exist to the path queue.
while child.as_posix() != parent.as_posix():
parent_exists_as = exists_as(parent)
if parent_exists_as == '':
paths.appendleft(parent)
child = parent
parent = parent.parent
elif parent_exists_as == 'directory':
break
else:
raise FileExistsError(
'Unable to create the directory: %r because the'
'parent path: %r exists as a %s.'
% (path.as_posix, parent.as_posix(), parent_exists_as)
)
if mode is None:
mode = 0o700
if paths:
for build_path in paths:
build_path.mkdir(mode=mode)
chown(build_path, user=user, group=group)
else:
# The given path already existed only need to do a chown.
chmod(path, mode_dir=mode)
chown(path, user=user, group=group)
return path
| true | 2 |
|
92 |
flutils
|
flutils.pathutils
|
exists_as
|
def exists_as(path: _PATH) -> str:
"""Return a string describing the file type if it exists.
This function processes the given ``path`` with
:obj:`~flutils.normalize_path`.
Args:
path (:obj:`str`, :obj:`bytes` or :obj:`Path <pathlib.Path>`):
The path to check for existence.
:rtype:
:obj:`str`
* ``''`` (empty string): if the given ``path`` does NOT exist; or,
is a broken symbolic link; or, other errors (such as permission
errors) are propagated.
* ``'directory'``: if the given ``path`` points to a directory or
is a symbolic link pointing to a directory.
* ``'file'``: if the given ``path`` points to a regular file or is a
symbolic link pointing to a regular file.
* ``'block device'``: if the given ``path`` points to a block device or
is a symbolic link pointing to a block device.
* ``'char device'``: if the given ``path`` points to a character device
or is a symbolic link pointing to a character device.
* ``'FIFO'``: if the given ``path`` points to a FIFO or is a symbolic
link pointing to a FIFO.
* ``'socket'``: if the given ``path`` points to a Unix socket or is a
symbolic link pointing to a Unix socket.
Example:
>>> from flutils.pathutils import exists_as
>>> exists_as('~/tmp')
'directory'
"""
path = normalize_path(path)
if path.is_dir():
return 'directory'
if path.is_file():
return 'file'
if path.is_block_device():
return 'block device'
if path.is_char_device():
return 'char device'
if path.is_fifo():
return 'FIFO'
if path.is_socket():
return 'socket'
return ''
|
[
335,
383
] | true |
[
"__all__",
"_PATH",
"_STR_OR_INT_OR_NONE"
] |
import functools
import getpass
import grp
import os
import pwd
import sys
from collections import deque
from os import PathLike
from pathlib import (
Path,
PosixPath,
WindowsPath,
)
from typing import (
Deque,
Generator,
Optional,
Union,
cast,
)
__all__ = [
'chmod',
'chown',
'directory_present',
'exists_as',
'find_paths',
'get_os_group',
'get_os_user',
'normalize_path',
'path_absent',
]
_PATH = Union[
PathLike,
PosixPath,
WindowsPath,
bytes,
str,
]
_STR_OR_INT_OR_NONE = Union[
str,
int,
None
]
def exists_as(path: _PATH) -> str:
"""Return a string describing the file type if it exists.
This function processes the given ``path`` with
:obj:`~flutils.normalize_path`.
Args:
path (:obj:`str`, :obj:`bytes` or :obj:`Path <pathlib.Path>`):
The path to check for existence.
:rtype:
:obj:`str`
* ``''`` (empty string): if the given ``path`` does NOT exist; or,
is a broken symbolic link; or, other errors (such as permission
errors) are propagated.
* ``'directory'``: if the given ``path`` points to a directory or
is a symbolic link pointing to a directory.
* ``'file'``: if the given ``path`` points to a regular file or is a
symbolic link pointing to a regular file.
* ``'block device'``: if the given ``path`` points to a block device or
is a symbolic link pointing to a block device.
* ``'char device'``: if the given ``path`` points to a character device
or is a symbolic link pointing to a character device.
* ``'FIFO'``: if the given ``path`` points to a FIFO or is a symbolic
link pointing to a FIFO.
* ``'socket'``: if the given ``path`` points to a Unix socket or is a
symbolic link pointing to a Unix socket.
Example:
>>> from flutils.pathutils import exists_as
>>> exists_as('~/tmp')
'directory'
"""
path = normalize_path(path)
if path.is_dir():
return 'directory'
if path.is_file():
return 'file'
if path.is_block_device():
return 'block device'
if path.is_char_device():
return 'char device'
if path.is_fifo():
return 'FIFO'
if path.is_socket():
return 'socket'
return ''
| true | 2 |
|
93 |
flutils
|
flutils.pathutils
|
find_paths
|
def find_paths(
pattern: _PATH
) -> Generator[Path, None, None]:
"""Find all paths that match the given :term:`glob pattern`.
This function pre-processes the given ``pattern`` with
:obj:`~flutils.normalize_path`.
Args:
pattern (:obj:`str`, :obj:`bytes` or :obj:`Path <pathlib.Path>`):
The path to find; which may contain a :term:`glob pattern`.
:rtype:
:obj:`Generator <typing.Generator>`
Yields:
:obj:`pathlib.PosixPath` or :obj:`pathlib.WindowsPath`
Example:
>>> from flutils.pathutils import find_paths
>>> list(find_paths('~/tmp/*'))
[PosixPath('/home/test_user/tmp/file_one'),
PosixPath('/home/test_user/tmp/dir_one')]
"""
pattern = normalize_path(pattern)
search = pattern.as_posix()[len(pattern.anchor):]
yield from Path(pattern.anchor).glob(search)
|
[
386,
413
] | true |
[
"__all__",
"_PATH",
"_STR_OR_INT_OR_NONE"
] |
import functools
import getpass
import grp
import os
import pwd
import sys
from collections import deque
from os import PathLike
from pathlib import (
Path,
PosixPath,
WindowsPath,
)
from typing import (
Deque,
Generator,
Optional,
Union,
cast,
)
__all__ = [
'chmod',
'chown',
'directory_present',
'exists_as',
'find_paths',
'get_os_group',
'get_os_user',
'normalize_path',
'path_absent',
]
_PATH = Union[
PathLike,
PosixPath,
WindowsPath,
bytes,
str,
]
_STR_OR_INT_OR_NONE = Union[
str,
int,
None
]
def find_paths(
pattern: _PATH
) -> Generator[Path, None, None]:
"""Find all paths that match the given :term:`glob pattern`.
This function pre-processes the given ``pattern`` with
:obj:`~flutils.normalize_path`.
Args:
pattern (:obj:`str`, :obj:`bytes` or :obj:`Path <pathlib.Path>`):
The path to find; which may contain a :term:`glob pattern`.
:rtype:
:obj:`Generator <typing.Generator>`
Yields:
:obj:`pathlib.PosixPath` or :obj:`pathlib.WindowsPath`
Example:
>>> from flutils.pathutils import find_paths
>>> list(find_paths('~/tmp/*'))
[PosixPath('/home/test_user/tmp/file_one'),
PosixPath('/home/test_user/tmp/dir_one')]
"""
pattern = normalize_path(pattern)
search = pattern.as_posix()[len(pattern.anchor):]
yield from Path(pattern.anchor).glob(search)
| false | 0 |
|
94 |
flutils
|
flutils.pathutils
|
get_os_group
|
def get_os_group(name: _STR_OR_INT_OR_NONE = None) -> grp.struct_group:
"""Get an operating system group object.
Args:
name (:obj:`str` or :obj:`int`, optional): The "group name" or ``gid``.
Defaults to the current users's group.
Raises:
OSError: If the given ``name`` does not exist as a "group
name" for this operating system.
OSError: If the given ``name`` is a ``gid`` and it does not
exist.
:rtype:
:obj:`struct_group <grp>`
* A tuple like object.
Example:
>>> from flutils.pathutils import get_os_group
>>> get_os_group('bar')
grp.struct_group(gr_name='bar', gr_passwd='*', gr_gid=2001,
gr_mem=['foo'])
"""
if name is None:
name = get_os_user().pw_gid
name = cast(int, name)
if isinstance(name, int):
try:
return grp.getgrgid(name)
except KeyError:
raise OSError(
'The given gid: %r, is not a valid gid for this operating '
'system.' % name
)
try:
return grp.getgrnam(name)
except KeyError:
raise OSError(
'The given name: %r, is not a valid "group name" '
'for this operating system.' % name
)
|
[
416,
454
] | true |
[
"__all__",
"_PATH",
"_STR_OR_INT_OR_NONE"
] |
import functools
import getpass
import grp
import os
import pwd
import sys
from collections import deque
from os import PathLike
from pathlib import (
Path,
PosixPath,
WindowsPath,
)
from typing import (
Deque,
Generator,
Optional,
Union,
cast,
)
__all__ = [
'chmod',
'chown',
'directory_present',
'exists_as',
'find_paths',
'get_os_group',
'get_os_user',
'normalize_path',
'path_absent',
]
_PATH = Union[
PathLike,
PosixPath,
WindowsPath,
bytes,
str,
]
_STR_OR_INT_OR_NONE = Union[
str,
int,
None
]
def get_os_group(name: _STR_OR_INT_OR_NONE = None) -> grp.struct_group:
"""Get an operating system group object.
Args:
name (:obj:`str` or :obj:`int`, optional): The "group name" or ``gid``.
Defaults to the current users's group.
Raises:
OSError: If the given ``name`` does not exist as a "group
name" for this operating system.
OSError: If the given ``name`` is a ``gid`` and it does not
exist.
:rtype:
:obj:`struct_group <grp>`
* A tuple like object.
Example:
>>> from flutils.pathutils import get_os_group
>>> get_os_group('bar')
grp.struct_group(gr_name='bar', gr_passwd='*', gr_gid=2001,
gr_mem=['foo'])
"""
if name is None:
name = get_os_user().pw_gid
name = cast(int, name)
if isinstance(name, int):
try:
return grp.getgrgid(name)
except KeyError:
raise OSError(
'The given gid: %r, is not a valid gid for this operating '
'system.' % name
)
try:
return grp.getgrnam(name)
except KeyError:
raise OSError(
'The given name: %r, is not a valid "group name" '
'for this operating system.' % name
)
| true | 2 |
|
95 |
flutils
|
flutils.pathutils
|
get_os_user
|
def get_os_user(name: _STR_OR_INT_OR_NONE = None) -> pwd.struct_passwd:
"""Return an user object representing an operating system user.
Args:
name (:obj:`str` or :obj:`int`, optional): The "login name" or
``uid``. Defaults to the current user's "login name".
Raises:
OSError: If the given ``name`` does not exist as a "login
name" for this operating system.
OSError: If the given ``name`` is an ``uid`` and it does not
exist.
:rtype:
:obj:`struct_passwd <pwd>`
* A tuple like object.
Example:
>>> from flutils.pathutils import get_os_user
>>> get_os_user('foo')
pwd.struct_passwd(pw_name='foo', pw_passwd='********', pw_uid=1001,
pw_gid=2001, pw_gecos='Foo Bar', pw_dir='/home/foo',
pw_shell='/usr/local/bin/bash')
"""
if isinstance(name, int):
try:
return pwd.getpwuid(name)
except KeyError:
raise OSError(
'The given uid: %r, is not a valid uid for this operating '
'system.' % name
)
if name is None:
name = getpass.getuser()
try:
return pwd.getpwnam(name)
except KeyError:
raise OSError(
'The given name: %r, is not a valid "login name" '
'for this operating system.' % name
)
|
[
460,
497
] | true |
[
"__all__",
"_PATH",
"_STR_OR_INT_OR_NONE"
] |
import functools
import getpass
import grp
import os
import pwd
import sys
from collections import deque
from os import PathLike
from pathlib import (
Path,
PosixPath,
WindowsPath,
)
from typing import (
Deque,
Generator,
Optional,
Union,
cast,
)
__all__ = [
'chmod',
'chown',
'directory_present',
'exists_as',
'find_paths',
'get_os_group',
'get_os_user',
'normalize_path',
'path_absent',
]
_PATH = Union[
PathLike,
PosixPath,
WindowsPath,
bytes,
str,
]
_STR_OR_INT_OR_NONE = Union[
str,
int,
None
]
def get_os_user(name: _STR_OR_INT_OR_NONE = None) -> pwd.struct_passwd:
"""Return an user object representing an operating system user.
Args:
name (:obj:`str` or :obj:`int`, optional): The "login name" or
``uid``. Defaults to the current user's "login name".
Raises:
OSError: If the given ``name`` does not exist as a "login
name" for this operating system.
OSError: If the given ``name`` is an ``uid`` and it does not
exist.
:rtype:
:obj:`struct_passwd <pwd>`
* A tuple like object.
Example:
>>> from flutils.pathutils import get_os_user
>>> get_os_user('foo')
pwd.struct_passwd(pw_name='foo', pw_passwd='********', pw_uid=1001,
pw_gid=2001, pw_gecos='Foo Bar', pw_dir='/home/foo',
pw_shell='/usr/local/bin/bash')
"""
if isinstance(name, int):
try:
return pwd.getpwuid(name)
except KeyError:
raise OSError(
'The given uid: %r, is not a valid uid for this operating '
'system.' % name
)
if name is None:
name = getpass.getuser()
try:
return pwd.getpwnam(name)
except KeyError:
raise OSError(
'The given name: %r, is not a valid "login name" '
'for this operating system.' % name
)
| true | 2 |
|
96 |
flutils
|
flutils.pathutils
|
path_absent
|
def path_absent(
path: _PATH,
) -> None:
"""Ensure the given ``path`` does **NOT** exist.
*New in version 0.4.*
If the given ``path`` does exist, it will be deleted.
If the given ``path`` is a directory, this function will
recursively delete all of the directory's contents.
This function processes the given ``path`` with
:obj:`~flutils.normalize_path`.
Args:
path (:obj:`str`, :obj:`bytes` or :obj:`Path <pathlib.Path>`):
The path to remove.
:rtype: :obj:`None`
Example:
>>> from flutils.pathutils import path_absent
>>> path_absent('~/tmp/test_path')
"""
path = normalize_path(path)
path = path.as_posix()
path = cast(str, path)
if os.path.exists(path):
if os.path.islink(path):
os.unlink(path)
elif os.path.isdir(path):
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
p = os.path.join(root, name)
if os.path.isfile(p) or os.path.islink(p):
os.unlink(p)
for name in dirs:
p = os.path.join(root, name)
if os.path.islink(p):
os.unlink(p)
else:
os.rmdir(p)
if os.path.isdir(path):
os.rmdir(path)
else:
os.unlink(path)
|
[
573,
620
] | true |
[
"__all__",
"_PATH",
"_STR_OR_INT_OR_NONE"
] |
import functools
import getpass
import grp
import os
import pwd
import sys
from collections import deque
from os import PathLike
from pathlib import (
Path,
PosixPath,
WindowsPath,
)
from typing import (
Deque,
Generator,
Optional,
Union,
cast,
)
__all__ = [
'chmod',
'chown',
'directory_present',
'exists_as',
'find_paths',
'get_os_group',
'get_os_user',
'normalize_path',
'path_absent',
]
_PATH = Union[
PathLike,
PosixPath,
WindowsPath,
bytes,
str,
]
_STR_OR_INT_OR_NONE = Union[
str,
int,
None
]
def path_absent(
path: _PATH,
) -> None:
"""Ensure the given ``path`` does **NOT** exist.
*New in version 0.4.*
If the given ``path`` does exist, it will be deleted.
If the given ``path`` is a directory, this function will
recursively delete all of the directory's contents.
This function processes the given ``path`` with
:obj:`~flutils.normalize_path`.
Args:
path (:obj:`str`, :obj:`bytes` or :obj:`Path <pathlib.Path>`):
The path to remove.
:rtype: :obj:`None`
Example:
>>> from flutils.pathutils import path_absent
>>> path_absent('~/tmp/test_path')
"""
path = normalize_path(path)
path = path.as_posix()
path = cast(str, path)
if os.path.exists(path):
if os.path.islink(path):
os.unlink(path)
elif os.path.isdir(path):
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
p = os.path.join(root, name)
if os.path.isfile(p) or os.path.islink(p):
os.unlink(p)
for name in dirs:
p = os.path.join(root, name)
if os.path.islink(p):
os.unlink(p)
else:
os.rmdir(p)
if os.path.isdir(path):
os.rmdir(path)
else:
os.unlink(path)
| true | 2 |
|
97 |
flutils
|
flutils.setuputils.cfg
|
each_sub_command_config
|
def each_sub_command_config(
setup_dir: Optional[Union[os.PathLike, str]] = None
) -> Generator[SetupCfgCommandConfig, None, None]:
format_kwargs: Dict[str, str] = {
'setup_dir': _prep_setup_dir(setup_dir),
'home': os.path.expanduser('~')
}
setup_cfg_path = os.path.join(format_kwargs['setup_dir'], 'setup.cfg')
parser = ConfigParser()
parser.read(setup_cfg_path)
format_kwargs['name'] = _get_name(parser, setup_cfg_path)
path = os.path.join(format_kwargs['setup_dir'], 'setup_commands.cfg')
if os.path.isfile(path):
parser = ConfigParser()
parser.read(path)
yield from _each_setup_cfg_command(parser, format_kwargs)
|
[
156,
171
] | true |
[] |
import os
from configparser import (
ConfigParser,
NoOptionError,
NoSectionError,
)
from traceback import (
FrameSummary,
extract_stack,
)
from typing import (
Dict,
Generator,
List,
NamedTuple,
Optional,
Tuple,
Union,
cast,
)
from flutils.strutils import underscore_to_camel
def each_sub_command_config(
setup_dir: Optional[Union[os.PathLike, str]] = None
) -> Generator[SetupCfgCommandConfig, None, None]:
format_kwargs: Dict[str, str] = {
'setup_dir': _prep_setup_dir(setup_dir),
'home': os.path.expanduser('~')
}
setup_cfg_path = os.path.join(format_kwargs['setup_dir'], 'setup.cfg')
parser = ConfigParser()
parser.read(setup_cfg_path)
format_kwargs['name'] = _get_name(parser, setup_cfg_path)
path = os.path.join(format_kwargs['setup_dir'], 'setup_commands.cfg')
if os.path.isfile(path):
parser = ConfigParser()
parser.read(path)
yield from _each_setup_cfg_command(parser, format_kwargs)
| true | 2 |
|
98 |
flutils
|
flutils.txtutils
|
len_without_ansi
|
def len_without_ansi(seq: Sequence) -> int:
"""Return the character length of the given
:obj:`Sequence <typing.Sequence>` without counting any ANSI codes.
*New in version 0.6*
Args:
seq (:obj:`Sequence <typing.Sequence>`): A string or a list/tuple
of strings.
:rtype:
:obj:`int`
Example:
>>> from flutils.txtutils import len_without_ansi
>>> text = '\\x1b[38;5;209mfoobar\\x1b[0m'
>>> len_without_ansi(text)
6
"""
if hasattr(seq, 'capitalize'):
_text: str = cast(str, seq)
seq = [c for c in _ANSI_RE.split(_text) if c]
seq = [c for c in chain(*map(_ANSI_RE.split, seq)) if c]
seq = cast(Sequence[str], seq)
out = 0
for text in seq:
if hasattr(text, 'capitalize'):
if text.startswith('\x1b[') and text.endswith('m'):
continue
else:
out += len(text)
return out
|
[
24,
55
] | true |
[
"__all__",
"_ANSI_RE"
] |
import re
from itertools import chain
from sys import hexversion
from textwrap import TextWrapper
from typing import (
List,
Optional,
Sequence,
cast,
)
__all__ = ['len_without_ansi', 'AnsiTextWrapper']
_ANSI_RE = re.compile('(\x1b\\[[0-9;:]+[ABCDEFGHJKSTfhilmns])')
def len_without_ansi(seq: Sequence) -> int:
"""Return the character length of the given
:obj:`Sequence <typing.Sequence>` without counting any ANSI codes.
*New in version 0.6*
Args:
seq (:obj:`Sequence <typing.Sequence>`): A string or a list/tuple
of strings.
:rtype:
:obj:`int`
Example:
>>> from flutils.txtutils import len_without_ansi
>>> text = '\\x1b[38;5;209mfoobar\\x1b[0m'
>>> len_without_ansi(text)
6
"""
if hasattr(seq, 'capitalize'):
_text: str = cast(str, seq)
seq = [c for c in _ANSI_RE.split(_text) if c]
seq = [c for c in chain(*map(_ANSI_RE.split, seq)) if c]
seq = cast(Sequence[str], seq)
out = 0
for text in seq:
if hasattr(text, 'capitalize'):
if text.startswith('\x1b[') and text.endswith('m'):
continue
else:
out += len(text)
return out
| true | 2 |
|
99 |
httpie
|
httpie.cli.argparser
|
HTTPieArgumentParser
|
parse_args
|
def parse_args(
self,
env: Environment,
args=None,
namespace=None
) -> argparse.Namespace:
self.env = env
self.args, no_options = super().parse_known_args(args, namespace)
if self.args.debug:
self.args.traceback = True
self.has_stdin_data = (
self.env.stdin
and not self.args.ignore_stdin
and not self.env.stdin_isatty
)
# Arguments processing and environment setup.
self._apply_no_options(no_options)
self._process_request_type()
self._process_download_options()
self._setup_standard_streams()
self._process_output_options()
self._process_pretty_options()
self._process_format_options()
self._guess_method()
self._parse_items()
if self.has_stdin_data:
self._body_from_file(self.env.stdin)
self._process_url()
self._process_auth()
if self.args.compress:
# TODO: allow --compress with --chunked / --multipart
if self.args.chunked:
self.error('cannot combine --compress and --chunked')
if self.args.multipart:
self.error('cannot combine --compress and --multipart')
return self.args
|
[
68,
105
] | false |
[] |
import argparse
import errno
import os
import re
import sys
from argparse import RawDescriptionHelpFormatter
from textwrap import dedent
from urllib.parse import urlsplit
from requests.utils import get_netrc_auth
from httpie.cli.argtypes import (
AuthCredentials, KeyValueArgType, PARSED_DEFAULT_FORMAT_OPTIONS,
parse_auth,
parse_format_options,
)
from httpie.cli.constants import (
HTTP_GET, HTTP_POST, OUTPUT_OPTIONS, OUTPUT_OPTIONS_DEFAULT,
OUTPUT_OPTIONS_DEFAULT_OFFLINE, OUTPUT_OPTIONS_DEFAULT_STDOUT_REDIRECTED,
OUT_RESP_BODY, PRETTY_MAP, PRETTY_STDOUT_TTY_ONLY, RequestType,
SEPARATOR_CREDENTIALS,
SEPARATOR_GROUP_ALL_ITEMS, SEPARATOR_GROUP_DATA_ITEMS, URL_SCHEME_RE,
)
from httpie.cli.exceptions import ParseError
from httpie.cli.requestitems import RequestItems
from httpie.context import Environment
from httpie.plugins.registry import plugin_manager
from httpie.utils import ExplicitNullAuth, get_content_type
class HTTPieArgumentParser(argparse.ArgumentParser):
def __init__(self, *args, formatter_class=HTTPieHelpFormatter, **kwargs):
kwargs['add_help'] = False
super().__init__(*args, formatter_class=formatter_class, **kwargs)
self.env = None
self.args = None
self.has_stdin_data = False
def parse_args(
self,
env: Environment,
args=None,
namespace=None
) -> argparse.Namespace:
self.env = env
self.args, no_options = super().parse_known_args(args, namespace)
if self.args.debug:
self.args.traceback = True
self.has_stdin_data = (
self.env.stdin
and not self.args.ignore_stdin
and not self.env.stdin_isatty
)
# Arguments processing and environment setup.
self._apply_no_options(no_options)
self._process_request_type()
self._process_download_options()
self._setup_standard_streams()
self._process_output_options()
self._process_pretty_options()
self._process_format_options()
self._guess_method()
self._parse_items()
if self.has_stdin_data:
self._body_from_file(self.env.stdin)
self._process_url()
self._process_auth()
if self.args.compress:
# TODO: allow --compress with --chunked / --multipart
if self.args.chunked:
self.error('cannot combine --compress and --chunked')
if self.args.multipart:
self.error('cannot combine --compress and --multipart')
return self.args
| true | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.