Dataset Viewer
prompt_id
int64 0
941
| project
stringclasses 24
values | module
stringlengths 7
49
| class
stringlengths 0
32
| method
stringlengths 2
37
| focal_method_txt
stringlengths 43
41.5k
| focal_method_lines
listlengths 2
2
| in_stack
bool 2
classes | globals
listlengths 0
16
| type_context
stringlengths 79
41.9k
| has_branch
bool 2
classes | total_branches
int64 0
3
|
---|---|---|---|---|---|---|---|---|---|---|---|
0 |
apimd
|
apimd.loader
|
walk_packages
|
def walk_packages(name: str, path: str) -> Iterator[tuple[str, str]]:
"""Walk packages without import them."""
path = abspath(path) + sep
valid = (path + name, path + name + PEP561_SUFFIX)
for root, _, fs in walk(path):
for f in fs:
if not f.endswith(('.py', '.pyi')):
continue
f_path = parent(join(root, f))
if not f_path.startswith(valid):
continue
name = (f_path
.removeprefix(path)
.replace(PEP561_SUFFIX, "")
.replace(sep, '.')
.removesuffix('.__init__'))
yield name, f_path
|
[
43,
59
] | false |
[
"__author__",
"__copyright__",
"__license__",
"__email__",
"PEP561_SUFFIX"
] |
from typing import Optional
from collections.abc import Sequence, Iterator
from sys import path as sys_path
from os import mkdir, walk
from os.path import isdir, isfile, abspath, join, sep, dirname
from importlib.abc import Loader
from importlib.machinery import EXTENSION_SUFFIXES
from importlib.util import find_spec, spec_from_file_location, module_from_spec
from .logger import logger
from .parser import parent, Parser
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2020-2021"
__license__ = "MIT"
__email__ = "pyslvs@gmail.com"
PEP561_SUFFIX = '-stubs'
def walk_packages(name: str, path: str) -> Iterator[tuple[str, str]]:
"""Walk packages without import them."""
path = abspath(path) + sep
valid = (path + name, path + name + PEP561_SUFFIX)
for root, _, fs in walk(path):
for f in fs:
if not f.endswith(('.py', '.pyi')):
continue
f_path = parent(join(root, f))
if not f_path.startswith(valid):
continue
name = (f_path
.removeprefix(path)
.replace(PEP561_SUFFIX, "")
.replace(sep, '.')
.removesuffix('.__init__'))
yield name, f_path
| true | 2 |
|
1 |
apimd
|
apimd.loader
|
loader
|
def loader(root: str, pwd: str, link: bool, level: int, toc: bool) -> str:
"""Package searching algorithm."""
p = Parser.new(link, level, toc)
for name, path in walk_packages(root, pwd):
# Load its source or stub
pure_py = False
for ext in [".py", ".pyi"]:
path_ext = path + ext
if not isfile(path_ext):
continue
logger.debug(f"{name} <= {path_ext}")
p.parse(name, _read(path_ext))
if ext == ".py":
pure_py = True
if pure_py:
continue
logger.debug(f"loading extension module for fully documented:")
# Try to load module here
for ext in EXTENSION_SUFFIXES:
path_ext = path + ext
if not isfile(path_ext):
continue
logger.debug(f"{name} <= {path_ext}")
if _load_module(name, path_ext, p):
break
else:
logger.warning(f"no module for {name} in this platform")
return p.compile()
|
[
78,
105
] | false |
[
"__author__",
"__copyright__",
"__license__",
"__email__",
"PEP561_SUFFIX"
] |
from typing import Optional
from collections.abc import Sequence, Iterator
from sys import path as sys_path
from os import mkdir, walk
from os.path import isdir, isfile, abspath, join, sep, dirname
from importlib.abc import Loader
from importlib.machinery import EXTENSION_SUFFIXES
from importlib.util import find_spec, spec_from_file_location, module_from_spec
from .logger import logger
from .parser import parent, Parser
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2020-2021"
__license__ = "MIT"
__email__ = "pyslvs@gmail.com"
PEP561_SUFFIX = '-stubs'
def loader(root: str, pwd: str, link: bool, level: int, toc: bool) -> str:
"""Package searching algorithm."""
p = Parser.new(link, level, toc)
for name, path in walk_packages(root, pwd):
# Load its source or stub
pure_py = False
for ext in [".py", ".pyi"]:
path_ext = path + ext
if not isfile(path_ext):
continue
logger.debug(f"{name} <= {path_ext}")
p.parse(name, _read(path_ext))
if ext == ".py":
pure_py = True
if pure_py:
continue
logger.debug(f"loading extension module for fully documented:")
# Try to load module here
for ext in EXTENSION_SUFFIXES:
path_ext = path + ext
if not isfile(path_ext):
continue
logger.debug(f"{name} <= {path_ext}")
if _load_module(name, path_ext, p):
break
else:
logger.warning(f"no module for {name} in this platform")
return p.compile()
| true | 2 |
|
2 |
apimd
|
apimd.parser
|
is_public_family
|
def is_public_family(name: str) -> bool:
"""Check the name is come from public modules or not."""
for n in name.split('.'):
# Magic name
if is_magic(n):
continue
# Local or private name
if n.startswith('_'):
return False
return True
|
[
61,
70
] | false |
[
"__author__",
"__copyright__",
"__license__",
"__email__",
"_I",
"_G",
"_API",
"ANY"
] |
from typing import cast, TypeVar, Union, Optional
from types import ModuleType
from collections.abc import Sequence, Iterable, Iterator
from itertools import chain
from dataclasses import dataclass, field
from inspect import getdoc
from ast import (
parse, unparse, get_docstring, AST, FunctionDef, AsyncFunctionDef, ClassDef,
Assign, AnnAssign, Delete, Import, ImportFrom, Name, Expr, Subscript, BinOp,
BitOr, Call, If, Try, Tuple, List, Set, Dict, Constant, Load, Attribute,
arg, expr, stmt, arguments, NodeTransformer,
)
from .logger import logger
from .pep585 import PEP585
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2020-2021"
__license__ = "MIT"
__email__ = "pyslvs@gmail.com"
_I = Union[Import, ImportFrom]
_G = Union[Assign, AnnAssign]
_API = Union[FunctionDef, AsyncFunctionDef, ClassDef]
ANY = 'Any'
def is_public_family(name: str) -> bool:
"""Check the name is come from public modules or not."""
for n in name.split('.'):
# Magic name
if is_magic(n):
continue
# Local or private name
if n.startswith('_'):
return False
return True
| true | 2 |
|
3 |
apimd
|
apimd.parser
|
walk_body
|
def walk_body(body: Sequence[stmt]) -> Iterator[stmt]:
"""Traverse around body and its simple definition scope."""
for node in body:
if isinstance(node, If):
yield from walk_body(node.body)
yield from walk_body(node.orelse)
elif isinstance(node, Try):
yield from walk_body(node.body)
for h in node.handlers:
yield from walk_body(h.body)
yield from walk_body(node.orelse)
yield from walk_body(node.finalbody)
else:
yield node
|
[
73,
86
] | false |
[
"__author__",
"__copyright__",
"__license__",
"__email__",
"_I",
"_G",
"_API",
"ANY"
] |
from typing import cast, TypeVar, Union, Optional
from types import ModuleType
from collections.abc import Sequence, Iterable, Iterator
from itertools import chain
from dataclasses import dataclass, field
from inspect import getdoc
from ast import (
parse, unparse, get_docstring, AST, FunctionDef, AsyncFunctionDef, ClassDef,
Assign, AnnAssign, Delete, Import, ImportFrom, Name, Expr, Subscript, BinOp,
BitOr, Call, If, Try, Tuple, List, Set, Dict, Constant, Load, Attribute,
arg, expr, stmt, arguments, NodeTransformer,
)
from .logger import logger
from .pep585 import PEP585
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2020-2021"
__license__ = "MIT"
__email__ = "pyslvs@gmail.com"
_I = Union[Import, ImportFrom]
_G = Union[Assign, AnnAssign]
_API = Union[FunctionDef, AsyncFunctionDef, ClassDef]
ANY = 'Any'
def walk_body(body: Sequence[stmt]) -> Iterator[stmt]:
"""Traverse around body and its simple definition scope."""
for node in body:
if isinstance(node, If):
yield from walk_body(node.body)
yield from walk_body(node.orelse)
elif isinstance(node, Try):
yield from walk_body(node.body)
for h in node.handlers:
yield from walk_body(h.body)
yield from walk_body(node.orelse)
yield from walk_body(node.finalbody)
else:
yield node
| true | 2 |
|
4 |
apimd
|
apimd.parser
|
esc_underscore
|
def esc_underscore(doc: str) -> str:
"""Escape underscore in names."""
if doc.count('_') > 1:
return doc.replace('_', r"\_")
else:
return doc
|
[
100,
105
] | false |
[
"__author__",
"__copyright__",
"__license__",
"__email__",
"_I",
"_G",
"_API",
"ANY"
] |
from typing import cast, TypeVar, Union, Optional
from types import ModuleType
from collections.abc import Sequence, Iterable, Iterator
from itertools import chain
from dataclasses import dataclass, field
from inspect import getdoc
from ast import (
parse, unparse, get_docstring, AST, FunctionDef, AsyncFunctionDef, ClassDef,
Assign, AnnAssign, Delete, Import, ImportFrom, Name, Expr, Subscript, BinOp,
BitOr, Call, If, Try, Tuple, List, Set, Dict, Constant, Load, Attribute,
arg, expr, stmt, arguments, NodeTransformer,
)
from .logger import logger
from .pep585 import PEP585
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2020-2021"
__license__ = "MIT"
__email__ = "pyslvs@gmail.com"
_I = Union[Import, ImportFrom]
_G = Union[Assign, AnnAssign]
_API = Union[FunctionDef, AsyncFunctionDef, ClassDef]
ANY = 'Any'
def esc_underscore(doc: str) -> str:
"""Escape underscore in names."""
if doc.count('_') > 1:
return doc.replace('_', r"\_")
else:
return doc
| true | 2 |
|
5 |
apimd
|
apimd.parser
|
doctest
|
def doctest(doc: str) -> str:
"""Wrap doctest as markdown Python code."""
keep = False
docs = []
lines = doc.splitlines()
for i, line in enumerate(lines):
signed = line.startswith(">>> ")
if signed:
if not keep:
docs.append("```python")
keep = True
elif keep:
docs.append("```")
keep = False
docs.append(line)
if signed and i == len(lines) - 1:
docs.append("```")
keep = False
return '\n'.join(docs)
|
[
108,
126
] | false |
[
"__author__",
"__copyright__",
"__license__",
"__email__",
"_I",
"_G",
"_API",
"ANY"
] |
from typing import cast, TypeVar, Union, Optional
from types import ModuleType
from collections.abc import Sequence, Iterable, Iterator
from itertools import chain
from dataclasses import dataclass, field
from inspect import getdoc
from ast import (
parse, unparse, get_docstring, AST, FunctionDef, AsyncFunctionDef, ClassDef,
Assign, AnnAssign, Delete, Import, ImportFrom, Name, Expr, Subscript, BinOp,
BitOr, Call, If, Try, Tuple, List, Set, Dict, Constant, Load, Attribute,
arg, expr, stmt, arguments, NodeTransformer,
)
from .logger import logger
from .pep585 import PEP585
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2020-2021"
__license__ = "MIT"
__email__ = "pyslvs@gmail.com"
_I = Union[Import, ImportFrom]
_G = Union[Assign, AnnAssign]
_API = Union[FunctionDef, AsyncFunctionDef, ClassDef]
ANY = 'Any'
def doctest(doc: str) -> str:
"""Wrap doctest as markdown Python code."""
keep = False
docs = []
lines = doc.splitlines()
for i, line in enumerate(lines):
signed = line.startswith(">>> ")
if signed:
if not keep:
docs.append("```python")
keep = True
elif keep:
docs.append("```")
keep = False
docs.append(line)
if signed and i == len(lines) - 1:
docs.append("```")
keep = False
return '\n'.join(docs)
| true | 2 |
|
6 |
apimd
|
apimd.parser
|
table
|
def table(*titles: str, items: Iterable[Union[str, Iterable[str]]]) -> str:
"""Create multi-column table with the titles.
Usage:
>>> table('a', 'b', [['c', 'd'], ['e', 'f']])
| a | b |
|:---:|:---:|
| c | d |
| e | f |
"""
return '\n'.join([_table_cell(titles), _table_split(titles),
'\n'.join(_table_cell([n] if isinstance(n, str) else n)
for n in items)]) + '\n\n'
|
[
140,
150
] | false |
[
"__author__",
"__copyright__",
"__license__",
"__email__",
"_I",
"_G",
"_API",
"ANY"
] |
from typing import cast, TypeVar, Union, Optional
from types import ModuleType
from collections.abc import Sequence, Iterable, Iterator
from itertools import chain
from dataclasses import dataclass, field
from inspect import getdoc
from ast import (
parse, unparse, get_docstring, AST, FunctionDef, AsyncFunctionDef, ClassDef,
Assign, AnnAssign, Delete, Import, ImportFrom, Name, Expr, Subscript, BinOp,
BitOr, Call, If, Try, Tuple, List, Set, Dict, Constant, Load, Attribute,
arg, expr, stmt, arguments, NodeTransformer,
)
from .logger import logger
from .pep585 import PEP585
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2020-2021"
__license__ = "MIT"
__email__ = "pyslvs@gmail.com"
_I = Union[Import, ImportFrom]
_G = Union[Assign, AnnAssign]
_API = Union[FunctionDef, AsyncFunctionDef, ClassDef]
ANY = 'Any'
def table(*titles: str, items: Iterable[Union[str, Iterable[str]]]) -> str:
"""Create multi-column table with the titles.
Usage:
>>> table('a', 'b', [['c', 'd'], ['e', 'f']])
| a | b |
|:---:|:---:|
| c | d |
| e | f |
"""
return '\n'.join([_table_cell(titles), _table_split(titles),
'\n'.join(_table_cell([n] if isinstance(n, str) else n)
for n in items)]) + '\n\n'
| false | 0 |
|
7 |
apimd
|
apimd.parser
|
const_type
|
def const_type(node: expr) -> str:
"""Constant type inference."""
if isinstance(node, Constant):
return _type_name(node.value)
elif isinstance(node, (Tuple, List, Set)):
return _type_name(node).lower() + _e_type(node.elts)
elif isinstance(node, Dict):
return 'dict' + _e_type(node.keys, node.values)
elif isinstance(node, Call) and isinstance(node.func, (Name, Attribute)):
func = unparse(node.func)
if func in chain({'bool', 'int', 'float', 'complex', 'str'},
PEP585.keys(), PEP585.values()):
return func
return ANY
|
[
181,
194
] | false |
[
"__author__",
"__copyright__",
"__license__",
"__email__",
"_I",
"_G",
"_API",
"ANY"
] |
from typing import cast, TypeVar, Union, Optional
from types import ModuleType
from collections.abc import Sequence, Iterable, Iterator
from itertools import chain
from dataclasses import dataclass, field
from inspect import getdoc
from ast import (
parse, unparse, get_docstring, AST, FunctionDef, AsyncFunctionDef, ClassDef,
Assign, AnnAssign, Delete, Import, ImportFrom, Name, Expr, Subscript, BinOp,
BitOr, Call, If, Try, Tuple, List, Set, Dict, Constant, Load, Attribute,
arg, expr, stmt, arguments, NodeTransformer,
)
from .logger import logger
from .pep585 import PEP585
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2020-2021"
__license__ = "MIT"
__email__ = "pyslvs@gmail.com"
_I = Union[Import, ImportFrom]
_G = Union[Assign, AnnAssign]
_API = Union[FunctionDef, AsyncFunctionDef, ClassDef]
ANY = 'Any'
def const_type(node: expr) -> str:
"""Constant type inference."""
if isinstance(node, Constant):
return _type_name(node.value)
elif isinstance(node, (Tuple, List, Set)):
return _type_name(node).lower() + _e_type(node.elts)
elif isinstance(node, Dict):
return 'dict' + _e_type(node.keys, node.values)
elif isinstance(node, Call) and isinstance(node.func, (Name, Attribute)):
func = unparse(node.func)
if func in chain({'bool', 'int', 'float', 'complex', 'str'},
PEP585.keys(), PEP585.values()):
return func
return ANY
| true | 2 |
|
8 |
apimd
|
apimd.parser
|
Resolver
|
visit_Constant
|
def visit_Constant(self, node: Constant) -> AST:
"""Check string is a name."""
if not isinstance(node.value, str):
return node
try:
e = cast(Expr, parse(node.value).body[0])
except SyntaxError:
return node
else:
return self.visit(e.value)
|
[
207,
216
] | false |
[
"__author__",
"__copyright__",
"__license__",
"__email__",
"_I",
"_G",
"_API",
"ANY"
] |
from typing import cast, TypeVar, Union, Optional
from types import ModuleType
from collections.abc import Sequence, Iterable, Iterator
from itertools import chain
from dataclasses import dataclass, field
from inspect import getdoc
from ast import (
parse, unparse, get_docstring, AST, FunctionDef, AsyncFunctionDef, ClassDef,
Assign, AnnAssign, Delete, Import, ImportFrom, Name, Expr, Subscript, BinOp,
BitOr, Call, If, Try, Tuple, List, Set, Dict, Constant, Load, Attribute,
arg, expr, stmt, arguments, NodeTransformer,
)
from .logger import logger
from .pep585 import PEP585
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2020-2021"
__license__ = "MIT"
__email__ = "pyslvs@gmail.com"
_I = Union[Import, ImportFrom]
_G = Union[Assign, AnnAssign]
_API = Union[FunctionDef, AsyncFunctionDef, ClassDef]
ANY = 'Any'
class Resolver(NodeTransformer):
def __init__(self, root: str, alias: dict[str, str], self_ty: str = ""):
"""Set root module, alias and generic self name."""
super(Resolver, self).__init__()
self.root = root
self.alias = alias
self.self_ty = self_ty
def visit_Constant(self, node: Constant) -> AST:
"""Check string is a name."""
if not isinstance(node.value, str):
return node
try:
e = cast(Expr, parse(node.value).body[0])
except SyntaxError:
return node
else:
return self.visit(e.value)
| true | 2 |
9 |
apimd
|
apimd.parser
|
Resolver
|
visit_Name
|
def visit_Name(self, node: Name) -> AST:
"""Replace global names with its expression recursively."""
if node.id == self.self_ty:
return Name("Self", Load())
name = _m(self.root, node.id)
if name in self.alias and name not in self.alias[name]:
e = cast(Expr, parse(self.alias[name]).body[0])
# Support `TypeVar`
if isinstance(e.value, Call) and isinstance(e.value.func, Name):
func_name = e.value.func.id
idf = self.alias.get(_m(self.root, func_name), func_name)
if idf == 'typing.TypeVar':
return node
return self.visit(e.value)
else:
return node
|
[
218,
233
] | false |
[
"__author__",
"__copyright__",
"__license__",
"__email__",
"_I",
"_G",
"_API",
"ANY"
] |
from typing import cast, TypeVar, Union, Optional
from types import ModuleType
from collections.abc import Sequence, Iterable, Iterator
from itertools import chain
from dataclasses import dataclass, field
from inspect import getdoc
from ast import (
parse, unparse, get_docstring, AST, FunctionDef, AsyncFunctionDef, ClassDef,
Assign, AnnAssign, Delete, Import, ImportFrom, Name, Expr, Subscript, BinOp,
BitOr, Call, If, Try, Tuple, List, Set, Dict, Constant, Load, Attribute,
arg, expr, stmt, arguments, NodeTransformer,
)
from .logger import logger
from .pep585 import PEP585
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2020-2021"
__license__ = "MIT"
__email__ = "pyslvs@gmail.com"
_I = Union[Import, ImportFrom]
_G = Union[Assign, AnnAssign]
_API = Union[FunctionDef, AsyncFunctionDef, ClassDef]
ANY = 'Any'
class Resolver(NodeTransformer):
def __init__(self, root: str, alias: dict[str, str], self_ty: str = ""):
"""Set root module, alias and generic self name."""
super(Resolver, self).__init__()
self.root = root
self.alias = alias
self.self_ty = self_ty
def visit_Name(self, node: Name) -> AST:
"""Replace global names with its expression recursively."""
if node.id == self.self_ty:
return Name("Self", Load())
name = _m(self.root, node.id)
if name in self.alias and name not in self.alias[name]:
e = cast(Expr, parse(self.alias[name]).body[0])
# Support `TypeVar`
if isinstance(e.value, Call) and isinstance(e.value.func, Name):
func_name = e.value.func.id
idf = self.alias.get(_m(self.root, func_name), func_name)
if idf == 'typing.TypeVar':
return node
return self.visit(e.value)
else:
return node
| true | 2 |
10 |
apimd
|
apimd.parser
|
Resolver
|
visit_Subscript
|
def visit_Subscript(self, node: Subscript) -> AST:
"""Implementation of PEP585 and PEP604."""
if not isinstance(node.value, Name):
return node
name = node.value.id
idf = self.alias.get(_m(self.root, name), name)
if idf == 'typing.Union':
if not isinstance(node.slice, Tuple):
return node.slice
b = node.slice.elts[0]
for e in node.slice.elts[1:]:
b = BinOp(b, BitOr(), e)
return b
elif idf == 'typing.Optional':
return BinOp(node.slice, BitOr(), Constant(None))
elif idf in PEP585:
logger.warning(f"{node.lineno}:{node.col_offset}: "
f"find deprecated name {idf}, "
f"recommended to use {PEP585[idf]}")
return Subscript(Name(PEP585[idf], Load), node.slice, node.ctx)
else:
return node
|
[
235,
256
] | false |
[
"__author__",
"__copyright__",
"__license__",
"__email__",
"_I",
"_G",
"_API",
"ANY"
] |
from typing import cast, TypeVar, Union, Optional
from types import ModuleType
from collections.abc import Sequence, Iterable, Iterator
from itertools import chain
from dataclasses import dataclass, field
from inspect import getdoc
from ast import (
parse, unparse, get_docstring, AST, FunctionDef, AsyncFunctionDef, ClassDef,
Assign, AnnAssign, Delete, Import, ImportFrom, Name, Expr, Subscript, BinOp,
BitOr, Call, If, Try, Tuple, List, Set, Dict, Constant, Load, Attribute,
arg, expr, stmt, arguments, NodeTransformer,
)
from .logger import logger
from .pep585 import PEP585
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2020-2021"
__license__ = "MIT"
__email__ = "pyslvs@gmail.com"
_I = Union[Import, ImportFrom]
_G = Union[Assign, AnnAssign]
_API = Union[FunctionDef, AsyncFunctionDef, ClassDef]
ANY = 'Any'
class Resolver(NodeTransformer):
def __init__(self, root: str, alias: dict[str, str], self_ty: str = ""):
"""Set root module, alias and generic self name."""
super(Resolver, self).__init__()
self.root = root
self.alias = alias
self.self_ty = self_ty
def visit_Subscript(self, node: Subscript) -> AST:
"""Implementation of PEP585 and PEP604."""
if not isinstance(node.value, Name):
return node
name = node.value.id
idf = self.alias.get(_m(self.root, name), name)
if idf == 'typing.Union':
if not isinstance(node.slice, Tuple):
return node.slice
b = node.slice.elts[0]
for e in node.slice.elts[1:]:
b = BinOp(b, BitOr(), e)
return b
elif idf == 'typing.Optional':
return BinOp(node.slice, BitOr(), Constant(None))
elif idf in PEP585:
logger.warning(f"{node.lineno}:{node.col_offset}: "
f"find deprecated name {idf}, "
f"recommended to use {PEP585[idf]}")
return Subscript(Name(PEP585[idf], Load), node.slice, node.ctx)
else:
return node
| true | 2 |
11 |
apimd
|
apimd.parser
|
Resolver
|
visit_Attribute
|
def visit_Attribute(self, node: Attribute) -> AST:
"""Remove `typing.*` prefix of annotation."""
if not isinstance(node.value, Name):
return node
if node.value.id == 'typing':
return Name(node.attr, Load())
else:
return node
|
[
258,
265
] | false |
[
"__author__",
"__copyright__",
"__license__",
"__email__",
"_I",
"_G",
"_API",
"ANY"
] |
from typing import cast, TypeVar, Union, Optional
from types import ModuleType
from collections.abc import Sequence, Iterable, Iterator
from itertools import chain
from dataclasses import dataclass, field
from inspect import getdoc
from ast import (
parse, unparse, get_docstring, AST, FunctionDef, AsyncFunctionDef, ClassDef,
Assign, AnnAssign, Delete, Import, ImportFrom, Name, Expr, Subscript, BinOp,
BitOr, Call, If, Try, Tuple, List, Set, Dict, Constant, Load, Attribute,
arg, expr, stmt, arguments, NodeTransformer,
)
from .logger import logger
from .pep585 import PEP585
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2020-2021"
__license__ = "MIT"
__email__ = "pyslvs@gmail.com"
_I = Union[Import, ImportFrom]
_G = Union[Assign, AnnAssign]
_API = Union[FunctionDef, AsyncFunctionDef, ClassDef]
ANY = 'Any'
class Resolver(NodeTransformer):
def __init__(self, root: str, alias: dict[str, str], self_ty: str = ""):
"""Set root module, alias and generic self name."""
super(Resolver, self).__init__()
self.root = root
self.alias = alias
self.self_ty = self_ty
def visit_Attribute(self, node: Attribute) -> AST:
"""Remove `typing.*` prefix of annotation."""
if not isinstance(node.value, Name):
return node
if node.value.id == 'typing':
return Name(node.attr, Load())
else:
return node
| true | 2 |
12 |
codetiming
|
codetiming._timers
|
Timers
|
apply
|
def apply(self, func: Callable[[List[float]], float], name: str) -> float:
"""Apply a function to the results of one named timer"""
if name in self._timings:
return func(self._timings[name])
raise KeyError(name)
|
[
41,
45
] | false |
[] |
import collections
import math
import statistics
from typing import TYPE_CHECKING, Any, Callable, Dict, List
class Timers(UserDict):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Add a private dictionary keeping track of all timings"""
super().__init__(*args, **kwargs)
self._timings: Dict[str, List[float]] = collections.defaultdict(list)
def apply(self, func: Callable[[List[float]], float], name: str) -> float:
"""Apply a function to the results of one named timer"""
if name in self._timings:
return func(self._timings[name])
raise KeyError(name)
| true | 2 |
13 |
codetiming
|
codetiming._timers
|
Timers
|
min
|
def min(self, name: str) -> float:
"""Minimal value of timings"""
return self.apply(lambda values: min(values or [0]), name=name)
|
[
55,
57
] | false |
[] |
import collections
import math
import statistics
from typing import TYPE_CHECKING, Any, Callable, Dict, List
class Timers(UserDict):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Add a private dictionary keeping track of all timings"""
super().__init__(*args, **kwargs)
self._timings: Dict[str, List[float]] = collections.defaultdict(list)
def min(self, name: str) -> float:
"""Minimal value of timings"""
return self.apply(lambda values: min(values or [0]), name=name)
| false | 0 |
14 |
codetiming
|
codetiming._timers
|
Timers
|
max
|
def max(self, name: str) -> float:
"""Maximal value of timings"""
return self.apply(lambda values: max(values or [0]), name=name)
|
[
59,
61
] | false |
[] |
import collections
import math
import statistics
from typing import TYPE_CHECKING, Any, Callable, Dict, List
class Timers(UserDict):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Add a private dictionary keeping track of all timings"""
super().__init__(*args, **kwargs)
self._timings: Dict[str, List[float]] = collections.defaultdict(list)
def max(self, name: str) -> float:
"""Maximal value of timings"""
return self.apply(lambda values: max(values or [0]), name=name)
| false | 0 |
15 |
codetiming
|
codetiming._timers
|
Timers
|
mean
|
def mean(self, name: str) -> float:
"""Mean value of timings"""
return self.apply(lambda values: statistics.mean(values or [0]), name=name)
|
[
63,
65
] | false |
[] |
import collections
import math
import statistics
from typing import TYPE_CHECKING, Any, Callable, Dict, List
class Timers(UserDict):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Add a private dictionary keeping track of all timings"""
super().__init__(*args, **kwargs)
self._timings: Dict[str, List[float]] = collections.defaultdict(list)
def mean(self, name: str) -> float:
"""Mean value of timings"""
return self.apply(lambda values: statistics.mean(values or [0]), name=name)
| false | 0 |
16 |
codetiming
|
codetiming._timers
|
Timers
|
median
|
def median(self, name: str) -> float:
"""Median value of timings"""
return self.apply(lambda values: statistics.median(values or [0]), name=name)
|
[
67,
69
] | false |
[] |
import collections
import math
import statistics
from typing import TYPE_CHECKING, Any, Callable, Dict, List
class Timers(UserDict):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Add a private dictionary keeping track of all timings"""
super().__init__(*args, **kwargs)
self._timings: Dict[str, List[float]] = collections.defaultdict(list)
def median(self, name: str) -> float:
"""Median value of timings"""
return self.apply(lambda values: statistics.median(values or [0]), name=name)
| false | 0 |
17 |
codetiming
|
codetiming._timers
|
Timers
|
stdev
|
def stdev(self, name: str) -> float:
"""Standard deviation of timings"""
if name in self._timings:
value = self._timings[name]
return statistics.stdev(value) if len(value) >= 2 else math.nan
raise KeyError(name)
|
[
71,
76
] | false |
[] |
import collections
import math
import statistics
from typing import TYPE_CHECKING, Any, Callable, Dict, List
class Timers(UserDict):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Add a private dictionary keeping track of all timings"""
super().__init__(*args, **kwargs)
self._timings: Dict[str, List[float]] = collections.defaultdict(list)
def stdev(self, name: str) -> float:
"""Standard deviation of timings"""
if name in self._timings:
value = self._timings[name]
return statistics.stdev(value) if len(value) >= 2 else math.nan
raise KeyError(name)
| true | 2 |
18 |
cookiecutter
|
cookiecutter.find
|
find_template
|
def find_template(repo_dir):
"""Determine which child directory of `repo_dir` is the project template.
:param repo_dir: Local directory of newly cloned repo.
:returns project_template: Relative path to project template.
"""
logger.debug('Searching %s for the project template.', repo_dir)
repo_dir_contents = os.listdir(repo_dir)
project_template = None
for item in repo_dir_contents:
if 'cookiecutter' in item and '{{' in item and '}}' in item:
project_template = item
break
if project_template:
project_template = os.path.join(repo_dir, project_template)
logger.debug('The project template appears to be %s', project_template)
return project_template
else:
raise NonTemplatedInputDirException
|
[
9,
30
] | false |
[
"logger"
] |
import logging
import os
from cookiecutter.exceptions import NonTemplatedInputDirException
logger = logging.getLogger(__name__)
def find_template(repo_dir):
"""Determine which child directory of `repo_dir` is the project template.
:param repo_dir: Local directory of newly cloned repo.
:returns project_template: Relative path to project template.
"""
logger.debug('Searching %s for the project template.', repo_dir)
repo_dir_contents = os.listdir(repo_dir)
project_template = None
for item in repo_dir_contents:
if 'cookiecutter' in item and '{{' in item and '}}' in item:
project_template = item
break
if project_template:
project_template = os.path.join(repo_dir, project_template)
logger.debug('The project template appears to be %s', project_template)
return project_template
else:
raise NonTemplatedInputDirException
| true | 2 |
|
19 |
cookiecutter
|
cookiecutter.prompt
|
read_user_choice
|
def read_user_choice(var_name, options):
"""Prompt the user to choose from several options for the given variable.
The first item will be returned if no input happens.
:param str var_name: Variable as specified in the context
:param list options: Sequence of options that are available to select from
:return: Exactly one item of ``options`` that has been chosen by the user
"""
# Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt
if not isinstance(options, list):
raise TypeError
if not options:
raise ValueError
choice_map = OrderedDict(
('{}'.format(i), value) for i, value in enumerate(options, 1)
)
choices = choice_map.keys()
default = '1'
choice_lines = ['{} - {}'.format(*c) for c in choice_map.items()]
prompt = '\n'.join(
(
'Select {}:'.format(var_name),
'\n'.join(choice_lines),
'Choose from {}'.format(', '.join(choices)),
)
)
user_choice = click.prompt(
prompt, type=click.Choice(choices), default=default, show_choices=False
)
return choice_map[user_choice]
|
[
43,
77
] | false |
[] |
import json
from collections import OrderedDict
import click
from jinja2.exceptions import UndefinedError
from cookiecutter.environment import StrictEnvironment
from cookiecutter.exceptions import UndefinedVariableInTemplate
def read_user_choice(var_name, options):
"""Prompt the user to choose from several options for the given variable.
The first item will be returned if no input happens.
:param str var_name: Variable as specified in the context
:param list options: Sequence of options that are available to select from
:return: Exactly one item of ``options`` that has been chosen by the user
"""
# Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt
if not isinstance(options, list):
raise TypeError
if not options:
raise ValueError
choice_map = OrderedDict(
('{}'.format(i), value) for i, value in enumerate(options, 1)
)
choices = choice_map.keys()
default = '1'
choice_lines = ['{} - {}'.format(*c) for c in choice_map.items()]
prompt = '\n'.join(
(
'Select {}:'.format(var_name),
'\n'.join(choice_lines),
'Choose from {}'.format(', '.join(choices)),
)
)
user_choice = click.prompt(
prompt, type=click.Choice(choices), default=default, show_choices=False
)
return choice_map[user_choice]
| true | 2 |
|
20 |
cookiecutter
|
cookiecutter.prompt
|
process_json
|
def process_json(user_value):
"""Load user-supplied value as a JSON dict.
:param str user_value: User-supplied value to load as a JSON dict
"""
try:
user_dict = json.loads(user_value, object_pairs_hook=OrderedDict)
except Exception:
# Leave it up to click to ask the user again
raise click.UsageError('Unable to decode to JSON.')
if not isinstance(user_dict, dict):
# Leave it up to click to ask the user again
raise click.UsageError('Requires JSON dict.')
return user_dict
|
[
80,
95
] | false |
[] |
import json
from collections import OrderedDict
import click
from jinja2.exceptions import UndefinedError
from cookiecutter.environment import StrictEnvironment
from cookiecutter.exceptions import UndefinedVariableInTemplate
def process_json(user_value):
"""Load user-supplied value as a JSON dict.
:param str user_value: User-supplied value to load as a JSON dict
"""
try:
user_dict = json.loads(user_value, object_pairs_hook=OrderedDict)
except Exception:
# Leave it up to click to ask the user again
raise click.UsageError('Unable to decode to JSON.')
if not isinstance(user_dict, dict):
# Leave it up to click to ask the user again
raise click.UsageError('Requires JSON dict.')
return user_dict
| true | 2 |
|
21 |
cookiecutter
|
cookiecutter.prompt
|
read_user_dict
|
def read_user_dict(var_name, default_value):
"""Prompt the user to provide a dictionary of data.
:param str var_name: Variable as specified in the context
:param default_value: Value that will be returned if no input is provided
:return: A Python dictionary to use in the context.
"""
# Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt
if not isinstance(default_value, dict):
raise TypeError
default_display = 'default'
user_value = click.prompt(
var_name, default=default_display, type=click.STRING, value_proc=process_json
)
if user_value == default_display:
# Return the given default w/o any processing
return default_value
return user_value
|
[
98,
118
] | false |
[] |
import json
from collections import OrderedDict
import click
from jinja2.exceptions import UndefinedError
from cookiecutter.environment import StrictEnvironment
from cookiecutter.exceptions import UndefinedVariableInTemplate
def read_user_dict(var_name, default_value):
"""Prompt the user to provide a dictionary of data.
:param str var_name: Variable as specified in the context
:param default_value: Value that will be returned if no input is provided
:return: A Python dictionary to use in the context.
"""
# Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt
if not isinstance(default_value, dict):
raise TypeError
default_display = 'default'
user_value = click.prompt(
var_name, default=default_display, type=click.STRING, value_proc=process_json
)
if user_value == default_display:
# Return the given default w/o any processing
return default_value
return user_value
| true | 2 |
|
22 |
cookiecutter
|
cookiecutter.prompt
|
render_variable
|
def render_variable(env, raw, cookiecutter_dict):
"""Render the next variable to be displayed in the user prompt.
Inside the prompting taken from the cookiecutter.json file, this renders
the next variable. For example, if a project_name is "Peanut Butter
Cookie", the repo_name could be be rendered with:
`{{ cookiecutter.project_name.replace(" ", "_") }}`.
This is then presented to the user as the default.
:param Environment env: A Jinja2 Environment object.
:param raw: The next value to be prompted for by the user.
:param dict cookiecutter_dict: The current context as it's gradually
being populated with variables.
:return: The rendered value for the default variable.
"""
if raw is None:
return None
elif isinstance(raw, dict):
return {
render_variable(env, k, cookiecutter_dict): render_variable(
env, v, cookiecutter_dict
)
for k, v in raw.items()
}
elif isinstance(raw, list):
return [render_variable(env, v, cookiecutter_dict) for v in raw]
elif not isinstance(raw, str):
raw = str(raw)
template = env.from_string(raw)
rendered_template = template.render(cookiecutter=cookiecutter_dict)
return rendered_template
|
[
121,
155
] | false |
[] |
import json
from collections import OrderedDict
import click
from jinja2.exceptions import UndefinedError
from cookiecutter.environment import StrictEnvironment
from cookiecutter.exceptions import UndefinedVariableInTemplate
def render_variable(env, raw, cookiecutter_dict):
"""Render the next variable to be displayed in the user prompt.
Inside the prompting taken from the cookiecutter.json file, this renders
the next variable. For example, if a project_name is "Peanut Butter
Cookie", the repo_name could be be rendered with:
`{{ cookiecutter.project_name.replace(" ", "_") }}`.
This is then presented to the user as the default.
:param Environment env: A Jinja2 Environment object.
:param raw: The next value to be prompted for by the user.
:param dict cookiecutter_dict: The current context as it's gradually
being populated with variables.
:return: The rendered value for the default variable.
"""
if raw is None:
return None
elif isinstance(raw, dict):
return {
render_variable(env, k, cookiecutter_dict): render_variable(
env, v, cookiecutter_dict
)
for k, v in raw.items()
}
elif isinstance(raw, list):
return [render_variable(env, v, cookiecutter_dict) for v in raw]
elif not isinstance(raw, str):
raw = str(raw)
template = env.from_string(raw)
rendered_template = template.render(cookiecutter=cookiecutter_dict)
return rendered_template
| true | 2 |
|
23 |
cookiecutter
|
cookiecutter.prompt
|
prompt_choice_for_config
|
def prompt_choice_for_config(cookiecutter_dict, env, key, options, no_input):
"""Prompt user with a set of options to choose from.
Each of the possible choices is rendered beforehand.
"""
rendered_options = [render_variable(env, raw, cookiecutter_dict) for raw in options]
if no_input:
return rendered_options[0]
return read_user_choice(key, rendered_options)
|
[
158,
167
] | false |
[] |
import json
from collections import OrderedDict
import click
from jinja2.exceptions import UndefinedError
from cookiecutter.environment import StrictEnvironment
from cookiecutter.exceptions import UndefinedVariableInTemplate
def prompt_choice_for_config(cookiecutter_dict, env, key, options, no_input):
"""Prompt user with a set of options to choose from.
Each of the possible choices is rendered beforehand.
"""
rendered_options = [render_variable(env, raw, cookiecutter_dict) for raw in options]
if no_input:
return rendered_options[0]
return read_user_choice(key, rendered_options)
| true | 2 |
|
24 |
cookiecutter
|
cookiecutter.prompt
|
prompt_for_config
|
def prompt_for_config(context, no_input=False):
"""Prompt user to enter a new config.
:param dict context: Source for field names and sample values.
:param no_input: Prompt the user at command line for manual configuration?
"""
cookiecutter_dict = OrderedDict([])
env = StrictEnvironment(context=context)
# First pass: Handle simple and raw variables, plus choices.
# These must be done first because the dictionaries keys and
# values might refer to them.
for key, raw in context['cookiecutter'].items():
if key.startswith('_') and not key.startswith('__'):
cookiecutter_dict[key] = raw
continue
elif key.startswith('__'):
cookiecutter_dict[key] = render_variable(env, raw, cookiecutter_dict)
continue
try:
if isinstance(raw, list):
# We are dealing with a choice variable
val = prompt_choice_for_config(
cookiecutter_dict, env, key, raw, no_input
)
cookiecutter_dict[key] = val
elif not isinstance(raw, dict):
# We are dealing with a regular variable
val = render_variable(env, raw, cookiecutter_dict)
if not no_input:
val = read_user_variable(key, val)
cookiecutter_dict[key] = val
except UndefinedError as err:
msg = "Unable to render variable '{}'".format(key)
raise UndefinedVariableInTemplate(msg, err, context)
# Second pass; handle the dictionaries.
for key, raw in context['cookiecutter'].items():
# Skip private type dicts not ot be rendered.
if key.startswith('_') and not key.startswith('__'):
continue
try:
if isinstance(raw, dict):
# We are dealing with a dict variable
val = render_variable(env, raw, cookiecutter_dict)
if not no_input and not key.startswith('__'):
val = read_user_dict(key, val)
cookiecutter_dict[key] = val
except UndefinedError as err:
msg = "Unable to render variable '{}'".format(key)
raise UndefinedVariableInTemplate(msg, err, context)
return cookiecutter_dict
|
[
170,
228
] | false |
[] |
import json
from collections import OrderedDict
import click
from jinja2.exceptions import UndefinedError
from cookiecutter.environment import StrictEnvironment
from cookiecutter.exceptions import UndefinedVariableInTemplate
def prompt_for_config(context, no_input=False):
"""Prompt user to enter a new config.
:param dict context: Source for field names and sample values.
:param no_input: Prompt the user at command line for manual configuration?
"""
cookiecutter_dict = OrderedDict([])
env = StrictEnvironment(context=context)
# First pass: Handle simple and raw variables, plus choices.
# These must be done first because the dictionaries keys and
# values might refer to them.
for key, raw in context['cookiecutter'].items():
if key.startswith('_') and not key.startswith('__'):
cookiecutter_dict[key] = raw
continue
elif key.startswith('__'):
cookiecutter_dict[key] = render_variable(env, raw, cookiecutter_dict)
continue
try:
if isinstance(raw, list):
# We are dealing with a choice variable
val = prompt_choice_for_config(
cookiecutter_dict, env, key, raw, no_input
)
cookiecutter_dict[key] = val
elif not isinstance(raw, dict):
# We are dealing with a regular variable
val = render_variable(env, raw, cookiecutter_dict)
if not no_input:
val = read_user_variable(key, val)
cookiecutter_dict[key] = val
except UndefinedError as err:
msg = "Unable to render variable '{}'".format(key)
raise UndefinedVariableInTemplate(msg, err, context)
# Second pass; handle the dictionaries.
for key, raw in context['cookiecutter'].items():
# Skip private type dicts not ot be rendered.
if key.startswith('_') and not key.startswith('__'):
continue
try:
if isinstance(raw, dict):
# We are dealing with a dict variable
val = render_variable(env, raw, cookiecutter_dict)
if not no_input and not key.startswith('__'):
val = read_user_dict(key, val)
cookiecutter_dict[key] = val
except UndefinedError as err:
msg = "Unable to render variable '{}'".format(key)
raise UndefinedVariableInTemplate(msg, err, context)
return cookiecutter_dict
| true | 2 |
|
25 |
cookiecutter
|
cookiecutter.replay
|
get_file_name
|
def get_file_name(replay_dir, template_name):
"""Get the name of file."""
suffix = '.json' if not template_name.endswith('.json') else ''
file_name = '{}{}'.format(template_name, suffix)
return os.path.join(replay_dir, file_name)
|
[
11,
15
] | false |
[] |
import json
import os
from cookiecutter.utils import make_sure_path_exists
def get_file_name(replay_dir, template_name):
"""Get the name of file."""
suffix = '.json' if not template_name.endswith('.json') else ''
file_name = '{}{}'.format(template_name, suffix)
return os.path.join(replay_dir, file_name)
| false | 0 |
|
26 |
cookiecutter
|
cookiecutter.replay
|
dump
|
def dump(replay_dir, template_name, context):
"""Write json data to file."""
if not make_sure_path_exists(replay_dir):
raise IOError('Unable to create replay dir at {}'.format(replay_dir))
if not isinstance(template_name, str):
raise TypeError('Template name is required to be of type str')
if not isinstance(context, dict):
raise TypeError('Context is required to be of type dict')
if 'cookiecutter' not in context:
raise ValueError('Context is required to contain a cookiecutter key')
replay_file = get_file_name(replay_dir, template_name)
with open(replay_file, 'w') as outfile:
json.dump(context, outfile, indent=2)
|
[
18,
35
] | false |
[] |
import json
import os
from cookiecutter.utils import make_sure_path_exists
def dump(replay_dir, template_name, context):
"""Write json data to file."""
if not make_sure_path_exists(replay_dir):
raise IOError('Unable to create replay dir at {}'.format(replay_dir))
if not isinstance(template_name, str):
raise TypeError('Template name is required to be of type str')
if not isinstance(context, dict):
raise TypeError('Context is required to be of type dict')
if 'cookiecutter' not in context:
raise ValueError('Context is required to contain a cookiecutter key')
replay_file = get_file_name(replay_dir, template_name)
with open(replay_file, 'w') as outfile:
json.dump(context, outfile, indent=2)
| true | 2 |
|
27 |
cookiecutter
|
cookiecutter.replay
|
load
|
def load(replay_dir, template_name):
"""Read json data from file."""
if not isinstance(template_name, str):
raise TypeError('Template name is required to be of type str')
replay_file = get_file_name(replay_dir, template_name)
with open(replay_file, 'r') as infile:
context = json.load(infile)
if 'cookiecutter' not in context:
raise ValueError('Context is required to contain a cookiecutter key')
return context
|
[
38,
51
] | false |
[] |
import json
import os
from cookiecutter.utils import make_sure_path_exists
def load(replay_dir, template_name):
"""Read json data from file."""
if not isinstance(template_name, str):
raise TypeError('Template name is required to be of type str')
replay_file = get_file_name(replay_dir, template_name)
with open(replay_file, 'r') as infile:
context = json.load(infile)
if 'cookiecutter' not in context:
raise ValueError('Context is required to contain a cookiecutter key')
return context
| true | 2 |
|
28 |
cookiecutter
|
cookiecutter.repository
|
expand_abbreviations
|
def expand_abbreviations(template, abbreviations):
"""Expand abbreviations in a template name.
:param template: The project template name.
:param abbreviations: Abbreviation definitions.
"""
if template in abbreviations:
return abbreviations[template]
# Split on colon. If there is no colon, rest will be empty
# and prefix will be the whole template
prefix, sep, rest = template.partition(':')
if prefix in abbreviations:
return abbreviations[prefix].format(rest)
return template
|
[
30,
45
] | false |
[
"REPO_REGEX"
] |
import os
import re
from cookiecutter.exceptions import RepositoryNotFound
from cookiecutter.vcs import clone
from cookiecutter.zipfile import unzip
REPO_REGEX = re.compile(
r"""
# something like git:// ssh:// file:// etc.
((((git|hg)\+)?(git|ssh|file|https?):(//)?)
| # or
(\w+@[\w\.]+) # something like user@...
)
""",
re.VERBOSE,
)
def expand_abbreviations(template, abbreviations):
"""Expand abbreviations in a template name.
:param template: The project template name.
:param abbreviations: Abbreviation definitions.
"""
if template in abbreviations:
return abbreviations[template]
# Split on colon. If there is no colon, rest will be empty
# and prefix will be the whole template
prefix, sep, rest = template.partition(':')
if prefix in abbreviations:
return abbreviations[prefix].format(rest)
return template
| true | 2 |
|
29 |
cookiecutter
|
cookiecutter.repository
|
repository_has_cookiecutter_json
|
def repository_has_cookiecutter_json(repo_directory):
"""Determine if `repo_directory` contains a `cookiecutter.json` file.
:param repo_directory: The candidate repository directory.
:return: True if the `repo_directory` is valid, else False.
"""
repo_directory_exists = os.path.isdir(repo_directory)
repo_config_exists = os.path.isfile(
os.path.join(repo_directory, 'cookiecutter.json')
)
return repo_directory_exists and repo_config_exists
|
[
48,
59
] | false |
[
"REPO_REGEX"
] |
import os
import re
from cookiecutter.exceptions import RepositoryNotFound
from cookiecutter.vcs import clone
from cookiecutter.zipfile import unzip
REPO_REGEX = re.compile(
r"""
# something like git:// ssh:// file:// etc.
((((git|hg)\+)?(git|ssh|file|https?):(//)?)
| # or
(\w+@[\w\.]+) # something like user@...
)
""",
re.VERBOSE,
)
def repository_has_cookiecutter_json(repo_directory):
"""Determine if `repo_directory` contains a `cookiecutter.json` file.
:param repo_directory: The candidate repository directory.
:return: True if the `repo_directory` is valid, else False.
"""
repo_directory_exists = os.path.isdir(repo_directory)
repo_config_exists = os.path.isfile(
os.path.join(repo_directory, 'cookiecutter.json')
)
return repo_directory_exists and repo_config_exists
| false | 0 |
|
30 |
cookiecutter
|
cookiecutter.repository
|
determine_repo_dir
|
def determine_repo_dir(
template,
abbreviations,
clone_to_dir,
checkout,
no_input,
password=None,
directory=None,
):
"""
Locate the repository directory from a template reference.
Applies repository abbreviations to the template reference.
If the template refers to a repository URL, clone it.
If the template is a path to a local repository, use it.
:param template: A directory containing a project template directory,
or a URL to a git repository.
:param abbreviations: A dictionary of repository abbreviation
definitions.
:param clone_to_dir: The directory to clone the repository into.
:param checkout: The branch, tag or commit ID to checkout after clone.
:param no_input: Prompt the user at command line for manual configuration?
:param password: The password to use when extracting the repository.
:param directory: Directory within repo where cookiecutter.json lives.
:return: A tuple containing the cookiecutter template directory, and
a boolean descriving whether that directory should be cleaned up
after the template has been instantiated.
:raises: `RepositoryNotFound` if a repository directory could not be found.
"""
template = expand_abbreviations(template, abbreviations)
if is_zip_file(template):
unzipped_dir = unzip(
zip_uri=template,
is_url=is_repo_url(template),
clone_to_dir=clone_to_dir,
no_input=no_input,
password=password,
)
repository_candidates = [unzipped_dir]
cleanup = True
elif is_repo_url(template):
cloned_repo = clone(
repo_url=template,
checkout=checkout,
clone_to_dir=clone_to_dir,
no_input=no_input,
)
repository_candidates = [cloned_repo]
cleanup = False
else:
repository_candidates = [template, os.path.join(clone_to_dir, template)]
cleanup = False
if directory:
repository_candidates = [
os.path.join(s, directory) for s in repository_candidates
]
for repo_candidate in repository_candidates:
if repository_has_cookiecutter_json(repo_candidate):
return repo_candidate, cleanup
raise RepositoryNotFound(
'A valid repository for "{}" could not be found in the following '
'locations:\n{}'.format(template, '\n'.join(repository_candidates))
)
|
[
62,
126
] | false |
[
"REPO_REGEX"
] |
import os
import re
from cookiecutter.exceptions import RepositoryNotFound
from cookiecutter.vcs import clone
from cookiecutter.zipfile import unzip
REPO_REGEX = re.compile(
r"""
# something like git:// ssh:// file:// etc.
((((git|hg)\+)?(git|ssh|file|https?):(//)?)
| # or
(\w+@[\w\.]+) # something like user@...
)
""",
re.VERBOSE,
)
def determine_repo_dir(
template,
abbreviations,
clone_to_dir,
checkout,
no_input,
password=None,
directory=None,
):
"""
Locate the repository directory from a template reference.
Applies repository abbreviations to the template reference.
If the template refers to a repository URL, clone it.
If the template is a path to a local repository, use it.
:param template: A directory containing a project template directory,
or a URL to a git repository.
:param abbreviations: A dictionary of repository abbreviation
definitions.
:param clone_to_dir: The directory to clone the repository into.
:param checkout: The branch, tag or commit ID to checkout after clone.
:param no_input: Prompt the user at command line for manual configuration?
:param password: The password to use when extracting the repository.
:param directory: Directory within repo where cookiecutter.json lives.
:return: A tuple containing the cookiecutter template directory, and
a boolean descriving whether that directory should be cleaned up
after the template has been instantiated.
:raises: `RepositoryNotFound` if a repository directory could not be found.
"""
template = expand_abbreviations(template, abbreviations)
if is_zip_file(template):
unzipped_dir = unzip(
zip_uri=template,
is_url=is_repo_url(template),
clone_to_dir=clone_to_dir,
no_input=no_input,
password=password,
)
repository_candidates = [unzipped_dir]
cleanup = True
elif is_repo_url(template):
cloned_repo = clone(
repo_url=template,
checkout=checkout,
clone_to_dir=clone_to_dir,
no_input=no_input,
)
repository_candidates = [cloned_repo]
cleanup = False
else:
repository_candidates = [template, os.path.join(clone_to_dir, template)]
cleanup = False
if directory:
repository_candidates = [
os.path.join(s, directory) for s in repository_candidates
]
for repo_candidate in repository_candidates:
if repository_has_cookiecutter_json(repo_candidate):
return repo_candidate, cleanup
raise RepositoryNotFound(
'A valid repository for "{}" could not be found in the following '
'locations:\n{}'.format(template, '\n'.join(repository_candidates))
)
| true | 2 |
|
31 |
cookiecutter
|
cookiecutter.zipfile
|
unzip
|
def unzip(zip_uri, is_url, clone_to_dir='.', no_input=False, password=None):
"""Download and unpack a zipfile at a given URI.
This will download the zipfile to the cookiecutter repository,
and unpack into a temporary directory.
:param zip_uri: The URI for the zipfile.
:param is_url: Is the zip URI a URL or a file?
:param clone_to_dir: The cookiecutter repository directory
to put the archive into.
:param no_input: Suppress any prompts
:param password: The password to use when unpacking the repository.
"""
# Ensure that clone_to_dir exists
clone_to_dir = os.path.expanduser(clone_to_dir)
make_sure_path_exists(clone_to_dir)
if is_url:
# Build the name of the cached zipfile,
# and prompt to delete if it already exists.
identifier = zip_uri.rsplit('/', 1)[1]
zip_path = os.path.join(clone_to_dir, identifier)
if os.path.exists(zip_path):
download = prompt_and_delete(zip_path, no_input=no_input)
else:
download = True
if download:
# (Re) download the zipfile
r = requests.get(zip_uri, stream=True)
with open(zip_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
else:
# Just use the local zipfile as-is.
zip_path = os.path.abspath(zip_uri)
# Now unpack the repository. The zipfile will be unpacked
# into a temporary directory
try:
zip_file = ZipFile(zip_path)
if len(zip_file.namelist()) == 0:
raise InvalidZipRepository('Zip repository {} is empty'.format(zip_uri))
# The first record in the zipfile should be the directory entry for
# the archive. If it isn't a directory, there's a problem.
first_filename = zip_file.namelist()[0]
if not first_filename.endswith('/'):
raise InvalidZipRepository(
'Zip repository {} does not include '
'a top-level directory'.format(zip_uri)
)
# Construct the final target directory
project_name = first_filename[:-1]
unzip_base = tempfile.mkdtemp()
unzip_path = os.path.join(unzip_base, project_name)
# Extract the zip file into the temporary directory
try:
zip_file.extractall(path=unzip_base)
except RuntimeError:
# File is password protected; try to get a password from the
# environment; if that doesn't work, ask the user.
if password is not None:
try:
zip_file.extractall(path=unzip_base, pwd=password.encode('utf-8'))
except RuntimeError:
raise InvalidZipRepository(
'Invalid password provided for protected repository'
)
elif no_input:
raise InvalidZipRepository(
'Unable to unlock password protected repository'
)
else:
retry = 0
while retry is not None:
try:
password = read_repo_password('Repo password')
zip_file.extractall(
path=unzip_base, pwd=password.encode('utf-8')
)
retry = None
except RuntimeError:
retry += 1
if retry == 3:
raise InvalidZipRepository(
'Invalid password provided for protected repository'
)
except BadZipFile:
raise InvalidZipRepository(
'Zip repository {} is not a valid zip archive:'.format(zip_uri)
)
return unzip_path
|
[
12,
111
] | false |
[] |
import os
import tempfile
from zipfile import BadZipFile, ZipFile
import requests
from cookiecutter.exceptions import InvalidZipRepository
from cookiecutter.prompt import read_repo_password
from cookiecutter.utils import make_sure_path_exists, prompt_and_delete
def unzip(zip_uri, is_url, clone_to_dir='.', no_input=False, password=None):
"""Download and unpack a zipfile at a given URI.
This will download the zipfile to the cookiecutter repository,
and unpack into a temporary directory.
:param zip_uri: The URI for the zipfile.
:param is_url: Is the zip URI a URL or a file?
:param clone_to_dir: The cookiecutter repository directory
to put the archive into.
:param no_input: Suppress any prompts
:param password: The password to use when unpacking the repository.
"""
# Ensure that clone_to_dir exists
clone_to_dir = os.path.expanduser(clone_to_dir)
make_sure_path_exists(clone_to_dir)
if is_url:
# Build the name of the cached zipfile,
# and prompt to delete if it already exists.
identifier = zip_uri.rsplit('/', 1)[1]
zip_path = os.path.join(clone_to_dir, identifier)
if os.path.exists(zip_path):
download = prompt_and_delete(zip_path, no_input=no_input)
else:
download = True
if download:
# (Re) download the zipfile
r = requests.get(zip_uri, stream=True)
with open(zip_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
else:
# Just use the local zipfile as-is.
zip_path = os.path.abspath(zip_uri)
# Now unpack the repository. The zipfile will be unpacked
# into a temporary directory
try:
zip_file = ZipFile(zip_path)
if len(zip_file.namelist()) == 0:
raise InvalidZipRepository('Zip repository {} is empty'.format(zip_uri))
# The first record in the zipfile should be the directory entry for
# the archive. If it isn't a directory, there's a problem.
first_filename = zip_file.namelist()[0]
if not first_filename.endswith('/'):
raise InvalidZipRepository(
'Zip repository {} does not include '
'a top-level directory'.format(zip_uri)
)
# Construct the final target directory
project_name = first_filename[:-1]
unzip_base = tempfile.mkdtemp()
unzip_path = os.path.join(unzip_base, project_name)
# Extract the zip file into the temporary directory
try:
zip_file.extractall(path=unzip_base)
except RuntimeError:
# File is password protected; try to get a password from the
# environment; if that doesn't work, ask the user.
if password is not None:
try:
zip_file.extractall(path=unzip_base, pwd=password.encode('utf-8'))
except RuntimeError:
raise InvalidZipRepository(
'Invalid password provided for protected repository'
)
elif no_input:
raise InvalidZipRepository(
'Unable to unlock password protected repository'
)
else:
retry = 0
while retry is not None:
try:
password = read_repo_password('Repo password')
zip_file.extractall(
path=unzip_base, pwd=password.encode('utf-8')
)
retry = None
except RuntimeError:
retry += 1
if retry == 3:
raise InvalidZipRepository(
'Invalid password provided for protected repository'
)
except BadZipFile:
raise InvalidZipRepository(
'Zip repository {} is not a valid zip archive:'.format(zip_uri)
)
return unzip_path
| true | 2 |
|
32 |
dataclasses_json
|
dataclasses_json.cfg
|
config
|
def config(metadata: dict = None, *,
# TODO: these can be typed more precisely
# Specifically, a Callable[A, B], where `B` is bound as a JSON type
encoder: Callable = None,
decoder: Callable = None,
mm_field: MarshmallowField = None,
letter_case: Callable[[str], str] = None,
undefined: Optional[Union[str, Undefined]] = None,
field_name: str = None,
exclude: Optional[Callable[[str, T], bool]] = None,
) -> Dict[str, dict]:
if metadata is None:
metadata = {}
lib_metadata = metadata.setdefault('dataclasses_json', {})
if encoder is not None:
lib_metadata['encoder'] = encoder
if decoder is not None:
lib_metadata['decoder'] = decoder
if mm_field is not None:
lib_metadata['mm_field'] = mm_field
if field_name is not None:
if letter_case is not None:
@functools.wraps(letter_case)
def override(_, _letter_case=letter_case, _field_name=field_name):
return _letter_case(_field_name)
else:
def override(_, _field_name=field_name):
return _field_name
letter_case = override
if letter_case is not None:
lib_metadata['letter_case'] = letter_case
if undefined is not None:
# Get the corresponding action for undefined parameters
if isinstance(undefined, str):
if not hasattr(Undefined, undefined.upper()):
valid_actions = list(action.name for action in Undefined)
raise UndefinedParameterError(
f"Invalid undefined parameter action, "
f"must be one of {valid_actions}")
undefined = Undefined[undefined.upper()]
lib_metadata['undefined'] = undefined
if exclude is not None:
lib_metadata['exclude'] = exclude
return metadata
|
[
43,
96
] | false |
[
"T",
"global_config"
] |
import functools
from typing import Callable, Dict, Optional, TypeVar, Union
from marshmallow.fields import Field as MarshmallowField
from dataclasses_json.undefined import Undefined, UndefinedParameterError
T = TypeVar("T")
global_config = _GlobalConfig()
def config(metadata: dict = None, *,
# TODO: these can be typed more precisely
# Specifically, a Callable[A, B], where `B` is bound as a JSON type
encoder: Callable = None,
decoder: Callable = None,
mm_field: MarshmallowField = None,
letter_case: Callable[[str], str] = None,
undefined: Optional[Union[str, Undefined]] = None,
field_name: str = None,
exclude: Optional[Callable[[str, T], bool]] = None,
) -> Dict[str, dict]:
if metadata is None:
metadata = {}
lib_metadata = metadata.setdefault('dataclasses_json', {})
if encoder is not None:
lib_metadata['encoder'] = encoder
if decoder is not None:
lib_metadata['decoder'] = decoder
if mm_field is not None:
lib_metadata['mm_field'] = mm_field
if field_name is not None:
if letter_case is not None:
@functools.wraps(letter_case)
def override(_, _letter_case=letter_case, _field_name=field_name):
return _letter_case(_field_name)
else:
def override(_, _field_name=field_name):
return _field_name
letter_case = override
if letter_case is not None:
lib_metadata['letter_case'] = letter_case
if undefined is not None:
# Get the corresponding action for undefined parameters
if isinstance(undefined, str):
if not hasattr(Undefined, undefined.upper()):
valid_actions = list(action.name for action in Undefined)
raise UndefinedParameterError(
f"Invalid undefined parameter action, "
f"must be one of {valid_actions}")
undefined = Undefined[undefined.upper()]
lib_metadata['undefined'] = undefined
if exclude is not None:
lib_metadata['exclude'] = exclude
return metadata
| true | 2 |
|
33 |
dataclasses_json
|
dataclasses_json.core
|
_ExtendedEncoder
|
default
|
def default(self, o) -> Json:
result: Json
if _isinstance_safe(o, Collection):
if _isinstance_safe(o, Mapping):
result = dict(o)
else:
result = list(o)
elif _isinstance_safe(o, datetime):
result = o.timestamp()
elif _isinstance_safe(o, UUID):
result = str(o)
elif _isinstance_safe(o, Enum):
result = o.value
elif _isinstance_safe(o, Decimal):
result = str(o)
else:
result = json.JSONEncoder.default(self, o)
return result
|
[
32,
49
] | false |
[
"Json",
"confs",
"FieldOverride"
] |
import copy
import json
import warnings
from collections import defaultdict, namedtuple
from dataclasses import (MISSING,
_is_dataclass_instance,
fields,
is_dataclass # type: ignore
)
from datetime import datetime, timezone
from decimal import Decimal
from enum import Enum
from typing import Any, Collection, Mapping, Union, get_type_hints
from uuid import UUID
from typing_inspect import is_union_type
from dataclasses_json import cfg
from dataclasses_json.utils import (_get_type_cons,
_handle_undefined_parameters_safe,
_is_collection, _is_mapping, _is_new_type,
_is_optional, _isinstance_safe,
_issubclass_safe)
Json = Union[dict, list, str, int, float, bool, None]
confs = ['encoder', 'decoder', 'mm_field', 'letter_case', 'exclude']
FieldOverride = namedtuple('FieldOverride', confs)
class _ExtendedEncoder(json.JSONEncoder):
def default(self, o) -> Json:
result: Json
if _isinstance_safe(o, Collection):
if _isinstance_safe(o, Mapping):
result = dict(o)
else:
result = list(o)
elif _isinstance_safe(o, datetime):
result = o.timestamp()
elif _isinstance_safe(o, UUID):
result = str(o)
elif _isinstance_safe(o, Enum):
result = o.value
elif _isinstance_safe(o, Decimal):
result = str(o)
else:
result = json.JSONEncoder.default(self, o)
return result
| true | 2 |
34 |
dataclasses_json
|
dataclasses_json.mm
|
build_type
|
def build_type(type_, options, mixin, field, cls):
def inner(type_, options):
while True:
if not _is_new_type(type_):
break
type_ = type_.__supertype__
if is_dataclass(type_):
if _issubclass_safe(type_, mixin):
options['field_many'] = bool(
_is_supported_generic(field.type) and _is_collection(
field.type))
return fields.Nested(type_.schema(), **options)
else:
warnings.warn(f"Nested dataclass field {field.name} of type "
f"{field.type} detected in "
f"{cls.__name__} that is not an instance of "
f"dataclass_json. Did you mean to recursively "
f"serialize this field? If so, make sure to "
f"augment {type_} with either the "
f"`dataclass_json` decorator or mixin.")
return fields.Field(**options)
origin = getattr(type_, '__origin__', type_)
args = [inner(a, {}) for a in getattr(type_, '__args__', []) if
a is not type(None)]
if _is_optional(type_):
options["allow_none"] = True
if origin in TYPES:
return TYPES[origin](*args, **options)
if _issubclass_safe(origin, Enum):
return EnumField(enum=origin, by_value=True, *args, **options)
if is_union_type(type_):
union_types = [a for a in getattr(type_, '__args__', []) if
a is not type(None)]
union_desc = dict(zip(union_types, args))
return _UnionField(union_desc, cls, field, **options)
warnings.warn(
f"Unknown type {type_} at {cls.__name__}.{field.name}: {field.type} "
f"It's advised to pass the correct marshmallow type to `mm_field`.")
return fields.Field(**options)
return inner(type_, options)
|
[
226,
274
] | false |
[
"TYPES",
"A",
"JsonData",
"TEncoded",
"TOneOrMulti",
"TOneOrMultiEncoded"
] |
import typing
import warnings
import sys
from copy import deepcopy
from dataclasses import MISSING, is_dataclass, fields as dc_fields
from datetime import datetime
from decimal import Decimal
from uuid import UUID
from enum import Enum
from typing_inspect import is_union_type
from marshmallow import fields, Schema, post_load
from marshmallow_enum import EnumField
from marshmallow.exceptions import ValidationError
from dataclasses_json.core import (_is_supported_generic, _decode_dataclass,
_ExtendedEncoder, _user_overrides_or_exts)
from dataclasses_json.utils import (_is_collection, _is_optional,
_issubclass_safe, _timestamp_to_dt_aware,
_is_new_type, _get_type_origin,
_handle_undefined_parameters_safe,
CatchAllVar)
TYPES = {
typing.Mapping: fields.Mapping,
typing.MutableMapping: fields.Mapping,
typing.List: fields.List,
typing.Dict: fields.Dict,
typing.Tuple: fields.Tuple,
typing.Callable: fields.Function,
typing.Any: fields.Raw,
dict: fields.Dict,
list: fields.List,
str: fields.Str,
int: fields.Int,
float: fields.Float,
bool: fields.Bool,
datetime: _TimestampField,
UUID: fields.UUID,
Decimal: fields.Decimal,
CatchAllVar: fields.Dict,
}
A = typing.TypeVar('A')
JsonData = typing.Union[str, bytes, bytearray]
TEncoded = typing.Dict[str, typing.Any]
TOneOrMulti = typing.Union[typing.List[A], A]
TOneOrMultiEncoded = typing.Union[typing.List[TEncoded], TEncoded]
class _UnionField(fields.Field):
def __init__(self, desc, cls, field, *args, **kwargs):
self.desc = desc
self.cls = cls
self.field = field
super().__init__(*args, **kwargs)
def build_type(type_, options, mixin, field, cls):
def inner(type_, options):
while True:
if not _is_new_type(type_):
break
type_ = type_.__supertype__
if is_dataclass(type_):
if _issubclass_safe(type_, mixin):
options['field_many'] = bool(
_is_supported_generic(field.type) and _is_collection(
field.type))
return fields.Nested(type_.schema(), **options)
else:
warnings.warn(f"Nested dataclass field {field.name} of type "
f"{field.type} detected in "
f"{cls.__name__} that is not an instance of "
f"dataclass_json. Did you mean to recursively "
f"serialize this field? If so, make sure to "
f"augment {type_} with either the "
f"`dataclass_json` decorator or mixin.")
return fields.Field(**options)
origin = getattr(type_, '__origin__', type_)
args = [inner(a, {}) for a in getattr(type_, '__args__', []) if
a is not type(None)]
if _is_optional(type_):
options["allow_none"] = True
if origin in TYPES:
return TYPES[origin](*args, **options)
if _issubclass_safe(origin, Enum):
return EnumField(enum=origin, by_value=True, *args, **options)
if is_union_type(type_):
union_types = [a for a in getattr(type_, '__args__', []) if
a is not type(None)]
union_desc = dict(zip(union_types, args))
return _UnionField(union_desc, cls, field, **options)
warnings.warn(
f"Unknown type {type_} at {cls.__name__}.{field.name}: {field.type} "
f"It's advised to pass the correct marshmallow type to `mm_field`.")
return fields.Field(**options)
return inner(type_, options)
| true | 2 |
|
35 |
dataclasses_json
|
dataclasses_json.mm
|
schema
|
def schema(cls, mixin, infer_missing):
schema = {}
overrides = _user_overrides_or_exts(cls)
# TODO check the undefined parameters and add the proper schema action
# https://marshmallow.readthedocs.io/en/stable/quickstart.html
for field in dc_fields(cls):
metadata = (field.metadata or {}).get('dataclasses_json', {})
metadata = overrides[field.name]
if metadata.mm_field is not None:
schema[field.name] = metadata.mm_field
else:
type_ = field.type
options = {}
missing_key = 'missing' if infer_missing else 'default'
if field.default is not MISSING:
options[missing_key] = field.default
elif field.default_factory is not MISSING:
options[missing_key] = field.default_factory
if options.get(missing_key, ...) is None:
options['allow_none'] = True
if _is_optional(type_):
options.setdefault(missing_key, None)
options['allow_none'] = True
if len(type_.__args__) == 2:
# Union[str, int, None] is optional too, but it has more than 1 typed field.
type_ = type_.__args__[0]
if metadata.letter_case is not None:
options['data_key'] = metadata.letter_case(field.name)
t = build_type(type_, options, mixin, field, cls)
# if type(t) is not fields.Field: # If we use `isinstance` we would return nothing.
if field.type != typing.Optional[CatchAllVar]:
schema[field.name] = t
return schema
|
[
277,
314
] | false |
[
"TYPES",
"A",
"JsonData",
"TEncoded",
"TOneOrMulti",
"TOneOrMultiEncoded"
] |
import typing
import warnings
import sys
from copy import deepcopy
from dataclasses import MISSING, is_dataclass, fields as dc_fields
from datetime import datetime
from decimal import Decimal
from uuid import UUID
from enum import Enum
from typing_inspect import is_union_type
from marshmallow import fields, Schema, post_load
from marshmallow_enum import EnumField
from marshmallow.exceptions import ValidationError
from dataclasses_json.core import (_is_supported_generic, _decode_dataclass,
_ExtendedEncoder, _user_overrides_or_exts)
from dataclasses_json.utils import (_is_collection, _is_optional,
_issubclass_safe, _timestamp_to_dt_aware,
_is_new_type, _get_type_origin,
_handle_undefined_parameters_safe,
CatchAllVar)
TYPES = {
typing.Mapping: fields.Mapping,
typing.MutableMapping: fields.Mapping,
typing.List: fields.List,
typing.Dict: fields.Dict,
typing.Tuple: fields.Tuple,
typing.Callable: fields.Function,
typing.Any: fields.Raw,
dict: fields.Dict,
list: fields.List,
str: fields.Str,
int: fields.Int,
float: fields.Float,
bool: fields.Bool,
datetime: _TimestampField,
UUID: fields.UUID,
Decimal: fields.Decimal,
CatchAllVar: fields.Dict,
}
A = typing.TypeVar('A')
JsonData = typing.Union[str, bytes, bytearray]
TEncoded = typing.Dict[str, typing.Any]
TOneOrMulti = typing.Union[typing.List[A], A]
TOneOrMultiEncoded = typing.Union[typing.List[TEncoded], TEncoded]
def schema(cls, mixin, infer_missing):
schema = {}
overrides = _user_overrides_or_exts(cls)
# TODO check the undefined parameters and add the proper schema action
# https://marshmallow.readthedocs.io/en/stable/quickstart.html
for field in dc_fields(cls):
metadata = (field.metadata or {}).get('dataclasses_json', {})
metadata = overrides[field.name]
if metadata.mm_field is not None:
schema[field.name] = metadata.mm_field
else:
type_ = field.type
options = {}
missing_key = 'missing' if infer_missing else 'default'
if field.default is not MISSING:
options[missing_key] = field.default
elif field.default_factory is not MISSING:
options[missing_key] = field.default_factory
if options.get(missing_key, ...) is None:
options['allow_none'] = True
if _is_optional(type_):
options.setdefault(missing_key, None)
options['allow_none'] = True
if len(type_.__args__) == 2:
# Union[str, int, None] is optional too, but it has more than 1 typed field.
type_ = type_.__args__[0]
if metadata.letter_case is not None:
options['data_key'] = metadata.letter_case(field.name)
t = build_type(type_, options, mixin, field, cls)
# if type(t) is not fields.Field: # If we use `isinstance` we would return nothing.
if field.type != typing.Optional[CatchAllVar]:
schema[field.name] = t
return schema
| true | 2 |
|
36 |
dataclasses_json
|
dataclasses_json.mm
|
build_schema
|
def build_schema(cls: typing.Type[A],
mixin,
infer_missing,
partial) -> typing.Type[SchemaType]:
Meta = type('Meta',
(),
{'fields': tuple(field.name for field in dc_fields(cls)
if
field.name != 'dataclass_json_config' and field.type !=
typing.Optional[CatchAllVar]),
# TODO #180
# 'render_module': global_config.json_module
})
@post_load
def make_instance(self, kvs, **kwargs):
return _decode_dataclass(cls, kvs, partial)
def dumps(self, *args, **kwargs):
if 'cls' not in kwargs:
kwargs['cls'] = _ExtendedEncoder
return Schema.dumps(self, *args, **kwargs)
def dump(self, obj, *, many=None):
dumped = Schema.dump(self, obj, many=many)
# TODO This is hacky, but the other option I can think of is to generate a different schema
# depending on dump and load, which is even more hacky
# The only problem is the catch all field, we can't statically create a schema for it
# so we just update the dumped dict
if many:
for i, _obj in enumerate(obj):
dumped[i].update(
_handle_undefined_parameters_safe(cls=_obj, kvs={},
usage="dump"))
else:
dumped.update(_handle_undefined_parameters_safe(cls=obj, kvs={},
usage="dump"))
return dumped
schema_ = schema(cls, mixin, infer_missing)
DataClassSchema: typing.Type[SchemaType] = type(
f'{cls.__name__.capitalize()}Schema',
(Schema,),
{'Meta': Meta,
f'make_{cls.__name__.lower()}': make_instance,
'dumps': dumps,
'dump': dump,
**schema_})
return DataClassSchema
|
[
317,
368
] | false |
[
"TYPES",
"A",
"JsonData",
"TEncoded",
"TOneOrMulti",
"TOneOrMultiEncoded"
] |
import typing
import warnings
import sys
from copy import deepcopy
from dataclasses import MISSING, is_dataclass, fields as dc_fields
from datetime import datetime
from decimal import Decimal
from uuid import UUID
from enum import Enum
from typing_inspect import is_union_type
from marshmallow import fields, Schema, post_load
from marshmallow_enum import EnumField
from marshmallow.exceptions import ValidationError
from dataclasses_json.core import (_is_supported_generic, _decode_dataclass,
_ExtendedEncoder, _user_overrides_or_exts)
from dataclasses_json.utils import (_is_collection, _is_optional,
_issubclass_safe, _timestamp_to_dt_aware,
_is_new_type, _get_type_origin,
_handle_undefined_parameters_safe,
CatchAllVar)
TYPES = {
typing.Mapping: fields.Mapping,
typing.MutableMapping: fields.Mapping,
typing.List: fields.List,
typing.Dict: fields.Dict,
typing.Tuple: fields.Tuple,
typing.Callable: fields.Function,
typing.Any: fields.Raw,
dict: fields.Dict,
list: fields.List,
str: fields.Str,
int: fields.Int,
float: fields.Float,
bool: fields.Bool,
datetime: _TimestampField,
UUID: fields.UUID,
Decimal: fields.Decimal,
CatchAllVar: fields.Dict,
}
A = typing.TypeVar('A')
JsonData = typing.Union[str, bytes, bytearray]
TEncoded = typing.Dict[str, typing.Any]
TOneOrMulti = typing.Union[typing.List[A], A]
TOneOrMultiEncoded = typing.Union[typing.List[TEncoded], TEncoded]
def build_schema(cls: typing.Type[A],
mixin,
infer_missing,
partial) -> typing.Type[SchemaType]:
Meta = type('Meta',
(),
{'fields': tuple(field.name for field in dc_fields(cls)
if
field.name != 'dataclass_json_config' and field.type !=
typing.Optional[CatchAllVar]),
# TODO #180
# 'render_module': global_config.json_module
})
@post_load
def make_instance(self, kvs, **kwargs):
return _decode_dataclass(cls, kvs, partial)
def dumps(self, *args, **kwargs):
if 'cls' not in kwargs:
kwargs['cls'] = _ExtendedEncoder
return Schema.dumps(self, *args, **kwargs)
def dump(self, obj, *, many=None):
dumped = Schema.dump(self, obj, many=many)
# TODO This is hacky, but the other option I can think of is to generate a different schema
# depending on dump and load, which is even more hacky
# The only problem is the catch all field, we can't statically create a schema for it
# so we just update the dumped dict
if many:
for i, _obj in enumerate(obj):
dumped[i].update(
_handle_undefined_parameters_safe(cls=_obj, kvs={},
usage="dump"))
else:
dumped.update(_handle_undefined_parameters_safe(cls=obj, kvs={},
usage="dump"))
return dumped
schema_ = schema(cls, mixin, infer_missing)
DataClassSchema: typing.Type[SchemaType] = type(
f'{cls.__name__.capitalize()}Schema',
(Schema,),
{'Meta': Meta,
f'make_{cls.__name__.lower()}': make_instance,
'dumps': dumps,
'dump': dump,
**schema_})
return DataClassSchema
| true | 2 |
|
37 |
dataclasses_json
|
dataclasses_json.undefined
|
_UndefinedParameterAction
|
handle_from_dict
|
@staticmethod
@abc.abstractmethod
def handle_from_dict(cls, kvs: Dict[Any, Any]) -> Dict[str, Any]:
"""
Return the parameters to initialize the class with.
"""
pass
|
[
19,
23
] | false |
[
"KnownParameters",
"UnknownParameters",
"CatchAll"
] |
import abc
import dataclasses
import functools
import inspect
from dataclasses import Field, fields
from typing import Any, Callable, Dict, Optional, Tuple
from enum import Enum
from marshmallow import ValidationError
from dataclasses_json.utils import CatchAllVar
KnownParameters = Dict[str, Any]
UnknownParameters = Dict[str, Any]
CatchAll = Optional[CatchAllVar]
class _UndefinedParameterAction(abc.ABC):
@staticmethod
@abc.abstractmethod
def handle_from_dict(cls, kvs: Dict[Any, Any]) -> Dict[str, Any]:
"""
Return the parameters to initialize the class with.
"""
pass
| false | 0 |
38 |
dataclasses_json
|
dataclasses_json.undefined
|
_UndefinedParameterAction
|
handle_to_dict
|
@staticmethod
def handle_to_dict(obj, kvs: Dict[Any, Any]) -> Dict[Any, Any]:
"""
Return the parameters that will be written to the output dict
"""
return kvs
|
[
26,
30
] | false |
[
"KnownParameters",
"UnknownParameters",
"CatchAll"
] |
import abc
import dataclasses
import functools
import inspect
from dataclasses import Field, fields
from typing import Any, Callable, Dict, Optional, Tuple
from enum import Enum
from marshmallow import ValidationError
from dataclasses_json.utils import CatchAllVar
KnownParameters = Dict[str, Any]
UnknownParameters = Dict[str, Any]
CatchAll = Optional[CatchAllVar]
class _UndefinedParameterAction(abc.ABC):
@staticmethod
def handle_to_dict(obj, kvs: Dict[Any, Any]) -> Dict[Any, Any]:
"""
Return the parameters that will be written to the output dict
"""
return kvs
| false | 0 |
39 |
dataclasses_json
|
dataclasses_json.undefined
|
_UndefinedParameterAction
|
handle_dump
|
@staticmethod
def handle_dump(obj) -> Dict[Any, Any]:
"""
Return the parameters that will be added to the schema dump.
"""
return {}
|
[
33,
37
] | false |
[
"KnownParameters",
"UnknownParameters",
"CatchAll"
] |
import abc
import dataclasses
import functools
import inspect
from dataclasses import Field, fields
from typing import Any, Callable, Dict, Optional, Tuple
from enum import Enum
from marshmallow import ValidationError
from dataclasses_json.utils import CatchAllVar
KnownParameters = Dict[str, Any]
UnknownParameters = Dict[str, Any]
CatchAll = Optional[CatchAllVar]
class _UndefinedParameterAction(abc.ABC):
@staticmethod
def handle_dump(obj) -> Dict[Any, Any]:
"""
Return the parameters that will be added to the schema dump.
"""
return {}
| false | 0 |
40 |
dataclasses_json
|
dataclasses_json.undefined
|
_UndefinedParameterAction
|
create_init
|
@staticmethod
def create_init(obj) -> Callable:
return obj.__init__
|
[
40,
41
] | false |
[
"KnownParameters",
"UnknownParameters",
"CatchAll"
] |
import abc
import dataclasses
import functools
import inspect
from dataclasses import Field, fields
from typing import Any, Callable, Dict, Optional, Tuple
from enum import Enum
from marshmallow import ValidationError
from dataclasses_json.utils import CatchAllVar
KnownParameters = Dict[str, Any]
UnknownParameters = Dict[str, Any]
CatchAll = Optional[CatchAllVar]
class _UndefinedParameterAction(abc.ABC):
@staticmethod
def create_init(obj) -> Callable:
return obj.__init__
| false | 0 |
41 |
dataclasses_json
|
dataclasses_json.undefined
|
_RaiseUndefinedParameters
|
handle_from_dict
|
@staticmethod
def handle_from_dict(cls, kvs: Dict) -> Dict[str, Any]:
known, unknown = \
_UndefinedParameterAction._separate_defined_undefined_kvs(
cls=cls, kvs=kvs)
if len(unknown) > 0:
raise UndefinedParameterError(
f"Received undefined initialization arguments {unknown}")
return known
|
[
65,
72
] | false |
[
"KnownParameters",
"UnknownParameters",
"CatchAll"
] |
import abc
import dataclasses
import functools
import inspect
from dataclasses import Field, fields
from typing import Any, Callable, Dict, Optional, Tuple
from enum import Enum
from marshmallow import ValidationError
from dataclasses_json.utils import CatchAllVar
KnownParameters = Dict[str, Any]
UnknownParameters = Dict[str, Any]
CatchAll = Optional[CatchAllVar]
class _RaiseUndefinedParameters(_UndefinedParameterAction):
@staticmethod
def handle_from_dict(cls, kvs: Dict) -> Dict[str, Any]:
known, unknown = \
_UndefinedParameterAction._separate_defined_undefined_kvs(
cls=cls, kvs=kvs)
if len(unknown) > 0:
raise UndefinedParameterError(
f"Received undefined initialization arguments {unknown}")
return known
| true | 2 |
42 |
dataclasses_json
|
dataclasses_json.undefined
|
_IgnoreUndefinedParameters
|
handle_from_dict
|
@staticmethod
def handle_from_dict(cls, kvs: Dict) -> Dict[str, Any]:
known_given_parameters, _ = \
_UndefinedParameterAction._separate_defined_undefined_kvs(
cls=cls, kvs=kvs)
return known_given_parameters
|
[
86,
90
] | false |
[
"KnownParameters",
"UnknownParameters",
"CatchAll"
] |
import abc
import dataclasses
import functools
import inspect
from dataclasses import Field, fields
from typing import Any, Callable, Dict, Optional, Tuple
from enum import Enum
from marshmallow import ValidationError
from dataclasses_json.utils import CatchAllVar
KnownParameters = Dict[str, Any]
UnknownParameters = Dict[str, Any]
CatchAll = Optional[CatchAllVar]
class _IgnoreUndefinedParameters(_UndefinedParameterAction):
@staticmethod
def handle_from_dict(cls, kvs: Dict) -> Dict[str, Any]:
known_given_parameters, _ = \
_UndefinedParameterAction._separate_defined_undefined_kvs(
cls=cls, kvs=kvs)
return known_given_parameters
| false | 0 |
43 |
dataclasses_json
|
dataclasses_json.undefined
|
_IgnoreUndefinedParameters
|
create_init
|
@staticmethod
def create_init(obj) -> Callable:
original_init = obj.__init__
init_signature = inspect.signature(original_init)
@functools.wraps(obj.__init__)
def _ignore_init(self, *args, **kwargs):
known_kwargs, _ = \
_CatchAllUndefinedParameters._separate_defined_undefined_kvs(
obj, kwargs)
num_params_takeable = len(
init_signature.parameters) - 1 # don't count self
num_args_takeable = num_params_takeable - len(known_kwargs)
args = args[:num_args_takeable]
bound_parameters = init_signature.bind_partial(self, *args,
**known_kwargs)
bound_parameters.apply_defaults()
arguments = bound_parameters.arguments
arguments.pop("self", None)
final_parameters = \
_IgnoreUndefinedParameters.handle_from_dict(obj, arguments)
original_init(self, **final_parameters)
return _ignore_init
|
[
93,
117
] | false |
[
"KnownParameters",
"UnknownParameters",
"CatchAll"
] |
import abc
import dataclasses
import functools
import inspect
from dataclasses import Field, fields
from typing import Any, Callable, Dict, Optional, Tuple
from enum import Enum
from marshmallow import ValidationError
from dataclasses_json.utils import CatchAllVar
KnownParameters = Dict[str, Any]
UnknownParameters = Dict[str, Any]
CatchAll = Optional[CatchAllVar]
class _IgnoreUndefinedParameters(_UndefinedParameterAction):
@staticmethod
def create_init(obj) -> Callable:
original_init = obj.__init__
init_signature = inspect.signature(original_init)
@functools.wraps(obj.__init__)
def _ignore_init(self, *args, **kwargs):
known_kwargs, _ = \
_CatchAllUndefinedParameters._separate_defined_undefined_kvs(
obj, kwargs)
num_params_takeable = len(
init_signature.parameters) - 1 # don't count self
num_args_takeable = num_params_takeable - len(known_kwargs)
args = args[:num_args_takeable]
bound_parameters = init_signature.bind_partial(self, *args,
**known_kwargs)
bound_parameters.apply_defaults()
arguments = bound_parameters.arguments
arguments.pop("self", None)
final_parameters = \
_IgnoreUndefinedParameters.handle_from_dict(obj, arguments)
original_init(self, **final_parameters)
return _ignore_init
| false | 0 |
44 |
dataclasses_json
|
dataclasses_json.undefined
|
_CatchAllUndefinedParameters
|
handle_from_dict
|
@staticmethod
def handle_from_dict(cls, kvs: Dict) -> Dict[str, Any]:
known, unknown = _UndefinedParameterAction \
._separate_defined_undefined_kvs(cls=cls, kvs=kvs)
catch_all_field = _CatchAllUndefinedParameters._get_catch_all_field(
cls=cls)
if catch_all_field.name in known:
already_parsed = isinstance(known[catch_all_field.name], dict)
default_value = _CatchAllUndefinedParameters._get_default(
catch_all_field=catch_all_field)
received_default = default_value == known[catch_all_field.name]
value_to_write: Any
if received_default and len(unknown) == 0:
value_to_write = default_value
elif received_default and len(unknown) > 0:
value_to_write = unknown
elif already_parsed:
# Did not receive default
value_to_write = known[catch_all_field.name]
if len(unknown) > 0:
value_to_write.update(unknown)
else:
error_message = f"Received input field with " \
f"same name as catch-all field: " \
f"'{catch_all_field.name}': " \
f"'{known[catch_all_field.name]}'"
raise UndefinedParameterError(error_message)
else:
value_to_write = unknown
known[catch_all_field.name] = value_to_write
return known
|
[
133,
166
] | false |
[
"KnownParameters",
"UnknownParameters",
"CatchAll"
] |
import abc
import dataclasses
import functools
import inspect
from dataclasses import Field, fields
from typing import Any, Callable, Dict, Optional, Tuple
from enum import Enum
from marshmallow import ValidationError
from dataclasses_json.utils import CatchAllVar
KnownParameters = Dict[str, Any]
UnknownParameters = Dict[str, Any]
CatchAll = Optional[CatchAllVar]
class _CatchAllUndefinedParameters(_UndefinedParameterAction):
@staticmethod
def handle_from_dict(cls, kvs: Dict) -> Dict[str, Any]:
known, unknown = _UndefinedParameterAction \
._separate_defined_undefined_kvs(cls=cls, kvs=kvs)
catch_all_field = _CatchAllUndefinedParameters._get_catch_all_field(
cls=cls)
if catch_all_field.name in known:
already_parsed = isinstance(known[catch_all_field.name], dict)
default_value = _CatchAllUndefinedParameters._get_default(
catch_all_field=catch_all_field)
received_default = default_value == known[catch_all_field.name]
value_to_write: Any
if received_default and len(unknown) == 0:
value_to_write = default_value
elif received_default and len(unknown) > 0:
value_to_write = unknown
elif already_parsed:
# Did not receive default
value_to_write = known[catch_all_field.name]
if len(unknown) > 0:
value_to_write.update(unknown)
else:
error_message = f"Received input field with " \
f"same name as catch-all field: " \
f"'{catch_all_field.name}': " \
f"'{known[catch_all_field.name]}'"
raise UndefinedParameterError(error_message)
else:
value_to_write = unknown
known[catch_all_field.name] = value_to_write
return known
| true | 2 |
45 |
dataclasses_json
|
dataclasses_json.undefined
|
_CatchAllUndefinedParameters
|
handle_to_dict
|
@staticmethod
def handle_to_dict(obj, kvs: Dict[Any, Any]) -> Dict[Any, Any]:
catch_all_field = \
_CatchAllUndefinedParameters._get_catch_all_field(obj)
undefined_parameters = kvs.pop(catch_all_field.name)
if isinstance(undefined_parameters, dict):
kvs.update(
undefined_parameters) # If desired handle letter case here
return kvs
|
[
193,
200
] | false |
[
"KnownParameters",
"UnknownParameters",
"CatchAll"
] |
import abc
import dataclasses
import functools
import inspect
from dataclasses import Field, fields
from typing import Any, Callable, Dict, Optional, Tuple
from enum import Enum
from marshmallow import ValidationError
from dataclasses_json.utils import CatchAllVar
KnownParameters = Dict[str, Any]
UnknownParameters = Dict[str, Any]
CatchAll = Optional[CatchAllVar]
class _CatchAllUndefinedParameters(_UndefinedParameterAction):
@staticmethod
def handle_to_dict(obj, kvs: Dict[Any, Any]) -> Dict[Any, Any]:
catch_all_field = \
_CatchAllUndefinedParameters._get_catch_all_field(obj)
undefined_parameters = kvs.pop(catch_all_field.name)
if isinstance(undefined_parameters, dict):
kvs.update(
undefined_parameters) # If desired handle letter case here
return kvs
| true | 2 |
46 |
dataclasses_json
|
dataclasses_json.undefined
|
_CatchAllUndefinedParameters
|
handle_dump
|
@staticmethod
def handle_dump(obj) -> Dict[Any, Any]:
catch_all_field = _CatchAllUndefinedParameters._get_catch_all_field(
cls=obj)
return getattr(obj, catch_all_field.name)
|
[
203,
206
] | false |
[
"KnownParameters",
"UnknownParameters",
"CatchAll"
] |
import abc
import dataclasses
import functools
import inspect
from dataclasses import Field, fields
from typing import Any, Callable, Dict, Optional, Tuple
from enum import Enum
from marshmallow import ValidationError
from dataclasses_json.utils import CatchAllVar
KnownParameters = Dict[str, Any]
UnknownParameters = Dict[str, Any]
CatchAll = Optional[CatchAllVar]
class _CatchAllUndefinedParameters(_UndefinedParameterAction):
@staticmethod
def handle_dump(obj) -> Dict[Any, Any]:
catch_all_field = _CatchAllUndefinedParameters._get_catch_all_field(
cls=obj)
return getattr(obj, catch_all_field.name)
| false | 0 |
47 |
dataclasses_json
|
dataclasses_json.undefined
|
_CatchAllUndefinedParameters
|
create_init
|
@staticmethod
def create_init(obj) -> Callable:
original_init = obj.__init__
init_signature = inspect.signature(original_init)
@functools.wraps(obj.__init__)
def _catch_all_init(self, *args, **kwargs):
known_kwargs, unknown_kwargs = \
_CatchAllUndefinedParameters._separate_defined_undefined_kvs(
obj, kwargs)
num_params_takeable = len(
init_signature.parameters) - 1 # don't count self
if _CatchAllUndefinedParameters._get_catch_all_field(
obj).name not in known_kwargs:
num_params_takeable -= 1
num_args_takeable = num_params_takeable - len(known_kwargs)
args, unknown_args = args[:num_args_takeable], args[
num_args_takeable:]
bound_parameters = init_signature.bind_partial(self, *args,
**known_kwargs)
unknown_args = {f"_UNKNOWN{i}": v for i, v in
enumerate(unknown_args)}
arguments = bound_parameters.arguments
arguments.update(unknown_args)
arguments.update(unknown_kwargs)
arguments.pop("self", None)
final_parameters = _CatchAllUndefinedParameters.handle_from_dict(
obj, arguments)
original_init(self, **final_parameters)
return _catch_all_init
|
[
209,
240
] | false |
[
"KnownParameters",
"UnknownParameters",
"CatchAll"
] |
import abc
import dataclasses
import functools
import inspect
from dataclasses import Field, fields
from typing import Any, Callable, Dict, Optional, Tuple
from enum import Enum
from marshmallow import ValidationError
from dataclasses_json.utils import CatchAllVar
KnownParameters = Dict[str, Any]
UnknownParameters = Dict[str, Any]
CatchAll = Optional[CatchAllVar]
class _CatchAllUndefinedParameters(_UndefinedParameterAction):
@staticmethod
def create_init(obj) -> Callable:
original_init = obj.__init__
init_signature = inspect.signature(original_init)
@functools.wraps(obj.__init__)
def _catch_all_init(self, *args, **kwargs):
known_kwargs, unknown_kwargs = \
_CatchAllUndefinedParameters._separate_defined_undefined_kvs(
obj, kwargs)
num_params_takeable = len(
init_signature.parameters) - 1 # don't count self
if _CatchAllUndefinedParameters._get_catch_all_field(
obj).name not in known_kwargs:
num_params_takeable -= 1
num_args_takeable = num_params_takeable - len(known_kwargs)
args, unknown_args = args[:num_args_takeable], args[
num_args_takeable:]
bound_parameters = init_signature.bind_partial(self, *args,
**known_kwargs)
unknown_args = {f"_UNKNOWN{i}": v for i, v in
enumerate(unknown_args)}
arguments = bound_parameters.arguments
arguments.update(unknown_args)
arguments.update(unknown_kwargs)
arguments.pop("self", None)
final_parameters = _CatchAllUndefinedParameters.handle_from_dict(
obj, arguments)
original_init(self, **final_parameters)
return _catch_all_init
| true | 2 |
48 |
docstring_parser
|
docstring_parser.common
|
DocstringMeta
|
__init__
|
def __init__(self, args: T.List[str], description: str) -> None:
"""Initialize self.
:param args: list of arguments. The exact content of this variable is
dependent on the kind of docstring; it's used to distinguish between
custom docstring meta information items.
:param description: associated docstring description.
"""
self.args = args
self.description = description
|
[
32,
41
] | false |
[
"PARAM_KEYWORDS",
"RAISES_KEYWORDS",
"RETURNS_KEYWORDS",
"YIELDS_KEYWORDS"
] |
import typing as T
PARAM_KEYWORDS = {
"param",
"parameter",
"arg",
"argument",
"attribute",
"key",
"keyword",
}
RAISES_KEYWORDS = {"raises", "raise", "except", "exception"}
RETURNS_KEYWORDS = {"return", "returns"}
YIELDS_KEYWORDS = {"yield", "yields"}
class DocstringMeta:
def __init__(self, args: T.List[str], description: str) -> None:
"""Initialize self.
:param args: list of arguments. The exact content of this variable is
dependent on the kind of docstring; it's used to distinguish between
custom docstring meta information items.
:param description: associated docstring description.
"""
self.args = args
self.description = description
| false | 0 |
49 |
docstring_parser
|
docstring_parser.common
|
DocstringParam
|
__init__
|
def __init__(
self,
args: T.List[str],
description: T.Optional[str],
arg_name: str,
type_name: T.Optional[str],
is_optional: T.Optional[bool],
default: T.Optional[str],
) -> None:
"""Initialize self."""
super().__init__(args, description)
self.arg_name = arg_name
self.type_name = type_name
self.is_optional = is_optional
self.default = default
|
[
47,
61
] | false |
[
"PARAM_KEYWORDS",
"RAISES_KEYWORDS",
"RETURNS_KEYWORDS",
"YIELDS_KEYWORDS"
] |
import typing as T
PARAM_KEYWORDS = {
"param",
"parameter",
"arg",
"argument",
"attribute",
"key",
"keyword",
}
RAISES_KEYWORDS = {"raises", "raise", "except", "exception"}
RETURNS_KEYWORDS = {"return", "returns"}
YIELDS_KEYWORDS = {"yield", "yields"}
class DocstringParam(DocstringMeta):
def __init__(
self,
args: T.List[str],
description: T.Optional[str],
arg_name: str,
type_name: T.Optional[str],
is_optional: T.Optional[bool],
default: T.Optional[str],
) -> None:
"""Initialize self."""
super().__init__(args, description)
self.arg_name = arg_name
self.type_name = type_name
self.is_optional = is_optional
self.default = default
| false | 0 |
50 |
docstring_parser
|
docstring_parser.common
|
DocstringReturns
|
__init__
|
def __init__(
self,
args: T.List[str],
description: T.Optional[str],
type_name: T.Optional[str],
is_generator: bool,
return_name: T.Optional[str] = None,
) -> None:
"""Initialize self."""
super().__init__(args, description)
self.type_name = type_name
self.is_generator = is_generator
self.return_name = return_name
|
[
67,
79
] | false |
[
"PARAM_KEYWORDS",
"RAISES_KEYWORDS",
"RETURNS_KEYWORDS",
"YIELDS_KEYWORDS"
] |
import typing as T
PARAM_KEYWORDS = {
"param",
"parameter",
"arg",
"argument",
"attribute",
"key",
"keyword",
}
RAISES_KEYWORDS = {"raises", "raise", "except", "exception"}
RETURNS_KEYWORDS = {"return", "returns"}
YIELDS_KEYWORDS = {"yield", "yields"}
class DocstringReturns(DocstringMeta):
def __init__(
self,
args: T.List[str],
description: T.Optional[str],
type_name: T.Optional[str],
is_generator: bool,
return_name: T.Optional[str] = None,
) -> None:
"""Initialize self."""
super().__init__(args, description)
self.type_name = type_name
self.is_generator = is_generator
self.return_name = return_name
| false | 0 |
51 |
docstring_parser
|
docstring_parser.common
|
DocstringRaises
|
__init__
|
def __init__(
self,
args: T.List[str],
description: T.Optional[str],
type_name: T.Optional[str],
) -> None:
"""Initialize self."""
super().__init__(args, description)
self.type_name = type_name
self.description = description
|
[
85,
94
] | false |
[
"PARAM_KEYWORDS",
"RAISES_KEYWORDS",
"RETURNS_KEYWORDS",
"YIELDS_KEYWORDS"
] |
import typing as T
PARAM_KEYWORDS = {
"param",
"parameter",
"arg",
"argument",
"attribute",
"key",
"keyword",
}
RAISES_KEYWORDS = {"raises", "raise", "except", "exception"}
RETURNS_KEYWORDS = {"return", "returns"}
YIELDS_KEYWORDS = {"yield", "yields"}
class DocstringRaises(DocstringMeta):
def __init__(
self,
args: T.List[str],
description: T.Optional[str],
type_name: T.Optional[str],
) -> None:
"""Initialize self."""
super().__init__(args, description)
self.type_name = type_name
self.description = description
| false | 0 |
52 |
docstring_parser
|
docstring_parser.common
|
DocstringDeprecated
|
__init__
|
def __init__(
self,
args: T.List[str],
description: T.Optional[str],
version: T.Optional[str],
) -> None:
"""Initialize self."""
super().__init__(args, description)
self.version = version
self.description = description
|
[
100,
109
] | false |
[
"PARAM_KEYWORDS",
"RAISES_KEYWORDS",
"RETURNS_KEYWORDS",
"YIELDS_KEYWORDS"
] |
import typing as T
PARAM_KEYWORDS = {
"param",
"parameter",
"arg",
"argument",
"attribute",
"key",
"keyword",
}
RAISES_KEYWORDS = {"raises", "raise", "except", "exception"}
RETURNS_KEYWORDS = {"return", "returns"}
YIELDS_KEYWORDS = {"yield", "yields"}
class DocstringDeprecated(DocstringMeta):
def __init__(
self,
args: T.List[str],
description: T.Optional[str],
version: T.Optional[str],
) -> None:
"""Initialize self."""
super().__init__(args, description)
self.version = version
self.description = description
| false | 0 |
53 |
docstring_parser
|
docstring_parser.common
|
Docstring
|
__init__
|
def __init__(self) -> None:
"""Initialize self."""
self.short_description = None # type: T.Optional[str]
self.long_description = None # type: T.Optional[str]
self.blank_after_short_description = False
self.blank_after_long_description = False
self.meta = []
|
[
115,
121
] | false |
[
"PARAM_KEYWORDS",
"RAISES_KEYWORDS",
"RETURNS_KEYWORDS",
"YIELDS_KEYWORDS"
] |
import typing as T
PARAM_KEYWORDS = {
"param",
"parameter",
"arg",
"argument",
"attribute",
"key",
"keyword",
}
RAISES_KEYWORDS = {"raises", "raise", "except", "exception"}
RETURNS_KEYWORDS = {"return", "returns"}
YIELDS_KEYWORDS = {"yield", "yields"}
class Docstring:
def __init__(self) -> None:
"""Initialize self."""
self.short_description = None # type: T.Optional[str]
self.long_description = None # type: T.Optional[str]
self.blank_after_short_description = False
self.blank_after_long_description = False
self.meta = []
| false | 0 |
54 |
docstring_parser
|
docstring_parser.google
|
GoogleParser
|
add_section
|
def add_section(self, section: Section):
"""Add or replace a section.
:param section: The new section.
"""
self.sections[section.title] = section
self._setup()
|
[
174,
181
] | false |
[
"GOOGLE_TYPED_ARG_REGEX",
"GOOGLE_ARG_DESC_REGEX",
"MULTIPLE_PATTERN",
"DEFAULT_SECTIONS"
] |
import inspect
import re
import typing as T
from collections import namedtuple, OrderedDict
from enum import IntEnum
from .common import (
PARAM_KEYWORDS,
RAISES_KEYWORDS,
RETURNS_KEYWORDS,
YIELDS_KEYWORDS,
Docstring,
DocstringMeta,
DocstringParam,
DocstringRaises,
DocstringReturns,
ParseError,
)
GOOGLE_TYPED_ARG_REGEX = re.compile(r"\s*(.+?)\s*\(\s*(.*[^\s]+)\s*\)")
GOOGLE_ARG_DESC_REGEX = re.compile(r".*\. Defaults to (.+)\.")
MULTIPLE_PATTERN = re.compile(r"(\s*[^:\s]+:)|([^:]*\]:.*)")
DEFAULT_SECTIONS = [
Section("Arguments", "param", SectionType.MULTIPLE),
Section("Args", "param", SectionType.MULTIPLE),
Section("Parameters", "param", SectionType.MULTIPLE),
Section("Params", "param", SectionType.MULTIPLE),
Section("Raises", "raises", SectionType.MULTIPLE),
Section("Exceptions", "raises", SectionType.MULTIPLE),
Section("Except", "raises", SectionType.MULTIPLE),
Section("Attributes", "attribute", SectionType.MULTIPLE),
Section("Example", "examples", SectionType.SINGULAR),
Section("Examples", "examples", SectionType.SINGULAR),
Section("Returns", "returns", SectionType.SINGULAR_OR_MULTIPLE),
Section("Yields", "yields", SectionType.SINGULAR_OR_MULTIPLE),
]
class GoogleParser:
def __init__(
self, sections: T.Optional[T.List[Section]] = None, title_colon=True
):
"""Setup sections.
:param sections: Recognized sections or None to defaults.
:param title_colon: require colon after section title.
"""
if not sections:
sections = DEFAULT_SECTIONS
self.sections = {s.title: s for s in sections}
self.title_colon = title_colon
self._setup()
def add_section(self, section: Section):
"""Add or replace a section.
:param section: The new section.
"""
self.sections[section.title] = section
self._setup()
| false | 0 |
55 |
docstring_parser
|
docstring_parser.google
|
GoogleParser
|
parse
|
def parse(self, text: str) -> Docstring:
"""Parse the Google-style docstring into its components.
:returns: parsed docstring
"""
ret = Docstring()
if not text:
return ret
# Clean according to PEP-0257
text = inspect.cleandoc(text)
# Find first title and split on its position
match = self.titles_re.search(text)
if match:
desc_chunk = text[: match.start()]
meta_chunk = text[match.start() :]
else:
desc_chunk = text
meta_chunk = ""
# Break description into short and long parts
parts = desc_chunk.split("\n", 1)
ret.short_description = parts[0] or None
if len(parts) > 1:
long_desc_chunk = parts[1] or ""
ret.blank_after_short_description = long_desc_chunk.startswith(
"\n"
)
ret.blank_after_long_description = long_desc_chunk.endswith("\n\n")
ret.long_description = long_desc_chunk.strip() or None
# Split by sections determined by titles
matches = list(self.titles_re.finditer(meta_chunk))
if not matches:
return ret
splits = []
for j in range(len(matches) - 1):
splits.append((matches[j].end(), matches[j + 1].start()))
splits.append((matches[-1].end(), len(meta_chunk)))
chunks = OrderedDict()
for j, (start, end) in enumerate(splits):
title = matches[j].group(1)
if title not in self.sections:
continue
chunks[title] = meta_chunk[start:end].strip("\n")
if not chunks:
return ret
# Add elements from each chunk
for title, chunk in chunks.items():
# Determine indent
indent_match = re.search(r"^\s+", chunk)
if not indent_match:
raise ParseError('Can\'t infer indent from "{}"'.format(chunk))
indent = indent_match.group()
# Check for singular elements
if self.sections[title].type in [
SectionType.SINGULAR,
SectionType.SINGULAR_OR_MULTIPLE,
]:
part = inspect.cleandoc(chunk)
ret.meta.append(self._build_meta(part, title))
continue
# Split based on lines which have exactly that indent
_re = "^" + indent + r"(?=\S)"
c_matches = list(re.finditer(_re, chunk, flags=re.M))
if not c_matches:
raise ParseError(
'No specification for "{}": "{}"'.format(title, chunk)
)
c_splits = []
for j in range(len(c_matches) - 1):
c_splits.append((c_matches[j].end(), c_matches[j + 1].start()))
c_splits.append((c_matches[-1].end(), len(chunk)))
for j, (start, end) in enumerate(c_splits):
part = chunk[start:end].strip("\n")
ret.meta.append(self._build_meta(part, title))
return ret
|
[
183,
265
] | false |
[
"GOOGLE_TYPED_ARG_REGEX",
"GOOGLE_ARG_DESC_REGEX",
"MULTIPLE_PATTERN",
"DEFAULT_SECTIONS"
] |
import inspect
import re
import typing as T
from collections import namedtuple, OrderedDict
from enum import IntEnum
from .common import (
PARAM_KEYWORDS,
RAISES_KEYWORDS,
RETURNS_KEYWORDS,
YIELDS_KEYWORDS,
Docstring,
DocstringMeta,
DocstringParam,
DocstringRaises,
DocstringReturns,
ParseError,
)
GOOGLE_TYPED_ARG_REGEX = re.compile(r"\s*(.+?)\s*\(\s*(.*[^\s]+)\s*\)")
GOOGLE_ARG_DESC_REGEX = re.compile(r".*\. Defaults to (.+)\.")
MULTIPLE_PATTERN = re.compile(r"(\s*[^:\s]+:)|([^:]*\]:.*)")
DEFAULT_SECTIONS = [
Section("Arguments", "param", SectionType.MULTIPLE),
Section("Args", "param", SectionType.MULTIPLE),
Section("Parameters", "param", SectionType.MULTIPLE),
Section("Params", "param", SectionType.MULTIPLE),
Section("Raises", "raises", SectionType.MULTIPLE),
Section("Exceptions", "raises", SectionType.MULTIPLE),
Section("Except", "raises", SectionType.MULTIPLE),
Section("Attributes", "attribute", SectionType.MULTIPLE),
Section("Example", "examples", SectionType.SINGULAR),
Section("Examples", "examples", SectionType.SINGULAR),
Section("Returns", "returns", SectionType.SINGULAR_OR_MULTIPLE),
Section("Yields", "yields", SectionType.SINGULAR_OR_MULTIPLE),
]
class GoogleParser:
def __init__(
self, sections: T.Optional[T.List[Section]] = None, title_colon=True
):
"""Setup sections.
:param sections: Recognized sections or None to defaults.
:param title_colon: require colon after section title.
"""
if not sections:
sections = DEFAULT_SECTIONS
self.sections = {s.title: s for s in sections}
self.title_colon = title_colon
self._setup()
def parse(self, text: str) -> Docstring:
"""Parse the Google-style docstring into its components.
:returns: parsed docstring
"""
ret = Docstring()
if not text:
return ret
# Clean according to PEP-0257
text = inspect.cleandoc(text)
# Find first title and split on its position
match = self.titles_re.search(text)
if match:
desc_chunk = text[: match.start()]
meta_chunk = text[match.start() :]
else:
desc_chunk = text
meta_chunk = ""
# Break description into short and long parts
parts = desc_chunk.split("\n", 1)
ret.short_description = parts[0] or None
if len(parts) > 1:
long_desc_chunk = parts[1] or ""
ret.blank_after_short_description = long_desc_chunk.startswith(
"\n"
)
ret.blank_after_long_description = long_desc_chunk.endswith("\n\n")
ret.long_description = long_desc_chunk.strip() or None
# Split by sections determined by titles
matches = list(self.titles_re.finditer(meta_chunk))
if not matches:
return ret
splits = []
for j in range(len(matches) - 1):
splits.append((matches[j].end(), matches[j + 1].start()))
splits.append((matches[-1].end(), len(meta_chunk)))
chunks = OrderedDict()
for j, (start, end) in enumerate(splits):
title = matches[j].group(1)
if title not in self.sections:
continue
chunks[title] = meta_chunk[start:end].strip("\n")
if not chunks:
return ret
# Add elements from each chunk
for title, chunk in chunks.items():
# Determine indent
indent_match = re.search(r"^\s+", chunk)
if not indent_match:
raise ParseError('Can\'t infer indent from "{}"'.format(chunk))
indent = indent_match.group()
# Check for singular elements
if self.sections[title].type in [
SectionType.SINGULAR,
SectionType.SINGULAR_OR_MULTIPLE,
]:
part = inspect.cleandoc(chunk)
ret.meta.append(self._build_meta(part, title))
continue
# Split based on lines which have exactly that indent
_re = "^" + indent + r"(?=\S)"
c_matches = list(re.finditer(_re, chunk, flags=re.M))
if not c_matches:
raise ParseError(
'No specification for "{}": "{}"'.format(title, chunk)
)
c_splits = []
for j in range(len(c_matches) - 1):
c_splits.append((c_matches[j].end(), c_matches[j + 1].start()))
c_splits.append((c_matches[-1].end(), len(chunk)))
for j, (start, end) in enumerate(c_splits):
part = chunk[start:end].strip("\n")
ret.meta.append(self._build_meta(part, title))
return ret
| true | 2 |
56 |
docstring_parser
|
docstring_parser.numpydoc
|
parse
|
def parse(text: str) -> Docstring:
"""Parse the numpy-style docstring into its components.
:returns: parsed docstring
"""
return NumpydocParser().parse(text)
|
[
325,
330
] | false |
[
"KV_REGEX",
"PARAM_KEY_REGEX",
"PARAM_OPTIONAL_REGEX",
"PARAM_DEFAULT_REGEX",
"RETURN_KEY_REGEX",
"DEFAULT_SECTIONS"
] |
import inspect
import itertools
import re
import typing as T
from .common import (
Docstring,
DocstringDeprecated,
DocstringMeta,
DocstringParam,
DocstringRaises,
DocstringReturns,
)
KV_REGEX = re.compile(r"^[^\s].*$", flags=re.M)
PARAM_KEY_REGEX = re.compile(r"^(?P<name>.*?)(?:\s*:\s*(?P<type>.*?))?$")
PARAM_OPTIONAL_REGEX = re.compile(r"(?P<type>.*?)(?:, optional|\(optional\))$")
PARAM_DEFAULT_REGEX = re.compile(
r"[Dd]efault(?: is | = |: |s to |)\s*(?P<value>[\w\-\.]+)"
)
RETURN_KEY_REGEX = re.compile(r"^(?:(?P<name>.*?)\s*:\s*)?(?P<type>.*?)$")
DEFAULT_SECTIONS = [
ParamSection("Parameters", "param"),
ParamSection("Params", "param"),
ParamSection("Arguments", "param"),
ParamSection("Args", "param"),
ParamSection("Other Parameters", "other_param"),
ParamSection("Other Params", "other_param"),
ParamSection("Other Arguments", "other_param"),
ParamSection("Other Args", "other_param"),
ParamSection("Receives", "receives"),
ParamSection("Receive", "receives"),
RaisesSection("Raises", "raises"),
RaisesSection("Raise", "raises"),
RaisesSection("Warns", "warns"),
RaisesSection("Warn", "warns"),
ParamSection("Attributes", "attribute"),
ParamSection("Attribute", "attribute"),
ReturnsSection("Returns", "returns"),
ReturnsSection("Return", "returns"),
YieldsSection("Yields", "yields"),
YieldsSection("Yield", "yields"),
Section("Examples", "examples"),
Section("Example", "examples"),
Section("Warnings", "warnings"),
Section("Warning", "warnings"),
Section("See Also", "see_also"),
Section("Related", "see_also"),
Section("Notes", "notes"),
Section("Note", "notes"),
Section("References", "references"),
Section("Reference", "references"),
DeprecationSection("deprecated", "deprecation"),
]
class NumpydocParser:
def __init__(self, sections: T.Optional[T.Dict[str, Section]] = None):
"""Setup sections.
:param sections: Recognized sections or None to defaults.
"""
sections = sections or DEFAULT_SECTIONS
self.sections = {s.title: s for s in sections}
self._setup()
def parse(text: str) -> Docstring:
"""Parse the numpy-style docstring into its components.
:returns: parsed docstring
"""
return NumpydocParser().parse(text)
| false | 0 |
|
57 |
docstring_parser
|
docstring_parser.numpydoc
|
Section
|
__init__
|
def __init__(self, title: str, key: str) -> None:
self.title = title
self.key = key
|
[
57,
59
] | false |
[
"KV_REGEX",
"PARAM_KEY_REGEX",
"PARAM_OPTIONAL_REGEX",
"PARAM_DEFAULT_REGEX",
"RETURN_KEY_REGEX",
"DEFAULT_SECTIONS"
] |
import inspect
import itertools
import re
import typing as T
from .common import (
Docstring,
DocstringDeprecated,
DocstringMeta,
DocstringParam,
DocstringRaises,
DocstringReturns,
)
KV_REGEX = re.compile(r"^[^\s].*$", flags=re.M)
PARAM_KEY_REGEX = re.compile(r"^(?P<name>.*?)(?:\s*:\s*(?P<type>.*?))?$")
PARAM_OPTIONAL_REGEX = re.compile(r"(?P<type>.*?)(?:, optional|\(optional\))$")
PARAM_DEFAULT_REGEX = re.compile(
r"[Dd]efault(?: is | = |: |s to |)\s*(?P<value>[\w\-\.]+)"
)
RETURN_KEY_REGEX = re.compile(r"^(?:(?P<name>.*?)\s*:\s*)?(?P<type>.*?)$")
DEFAULT_SECTIONS = [
ParamSection("Parameters", "param"),
ParamSection("Params", "param"),
ParamSection("Arguments", "param"),
ParamSection("Args", "param"),
ParamSection("Other Parameters", "other_param"),
ParamSection("Other Params", "other_param"),
ParamSection("Other Arguments", "other_param"),
ParamSection("Other Args", "other_param"),
ParamSection("Receives", "receives"),
ParamSection("Receive", "receives"),
RaisesSection("Raises", "raises"),
RaisesSection("Raise", "raises"),
RaisesSection("Warns", "warns"),
RaisesSection("Warn", "warns"),
ParamSection("Attributes", "attribute"),
ParamSection("Attribute", "attribute"),
ReturnsSection("Returns", "returns"),
ReturnsSection("Return", "returns"),
YieldsSection("Yields", "yields"),
YieldsSection("Yield", "yields"),
Section("Examples", "examples"),
Section("Example", "examples"),
Section("Warnings", "warnings"),
Section("Warning", "warnings"),
Section("See Also", "see_also"),
Section("Related", "see_also"),
Section("Notes", "notes"),
Section("Note", "notes"),
Section("References", "references"),
Section("Reference", "references"),
DeprecationSection("deprecated", "deprecation"),
]
class Section:
def __init__(self, title: str, key: str) -> None:
self.title = title
self.key = key
| false | 0 |
58 |
docstring_parser
|
docstring_parser.numpydoc
|
Section
|
parse
|
def parse(self, text: str) -> T.Iterable[DocstringMeta]:
"""Parse ``DocstringMeta`` objects from the body of this section.
:param text: section body text. Should be cleaned with
``inspect.cleandoc`` before parsing.
"""
yield DocstringMeta([self.key], description=_clean_str(text))
|
[
70,
76
] | false |
[
"KV_REGEX",
"PARAM_KEY_REGEX",
"PARAM_OPTIONAL_REGEX",
"PARAM_DEFAULT_REGEX",
"RETURN_KEY_REGEX",
"DEFAULT_SECTIONS"
] |
import inspect
import itertools
import re
import typing as T
from .common import (
Docstring,
DocstringDeprecated,
DocstringMeta,
DocstringParam,
DocstringRaises,
DocstringReturns,
)
KV_REGEX = re.compile(r"^[^\s].*$", flags=re.M)
PARAM_KEY_REGEX = re.compile(r"^(?P<name>.*?)(?:\s*:\s*(?P<type>.*?))?$")
PARAM_OPTIONAL_REGEX = re.compile(r"(?P<type>.*?)(?:, optional|\(optional\))$")
PARAM_DEFAULT_REGEX = re.compile(
r"[Dd]efault(?: is | = |: |s to |)\s*(?P<value>[\w\-\.]+)"
)
RETURN_KEY_REGEX = re.compile(r"^(?:(?P<name>.*?)\s*:\s*)?(?P<type>.*?)$")
DEFAULT_SECTIONS = [
ParamSection("Parameters", "param"),
ParamSection("Params", "param"),
ParamSection("Arguments", "param"),
ParamSection("Args", "param"),
ParamSection("Other Parameters", "other_param"),
ParamSection("Other Params", "other_param"),
ParamSection("Other Arguments", "other_param"),
ParamSection("Other Args", "other_param"),
ParamSection("Receives", "receives"),
ParamSection("Receive", "receives"),
RaisesSection("Raises", "raises"),
RaisesSection("Raise", "raises"),
RaisesSection("Warns", "warns"),
RaisesSection("Warn", "warns"),
ParamSection("Attributes", "attribute"),
ParamSection("Attribute", "attribute"),
ReturnsSection("Returns", "returns"),
ReturnsSection("Return", "returns"),
YieldsSection("Yields", "yields"),
YieldsSection("Yield", "yields"),
Section("Examples", "examples"),
Section("Example", "examples"),
Section("Warnings", "warnings"),
Section("Warning", "warnings"),
Section("See Also", "see_also"),
Section("Related", "see_also"),
Section("Notes", "notes"),
Section("Note", "notes"),
Section("References", "references"),
Section("Reference", "references"),
DeprecationSection("deprecated", "deprecation"),
]
class Section:
def __init__(self, title: str, key: str) -> None:
self.title = title
self.key = key
def parse(self, text: str) -> T.Iterable[DocstringMeta]:
"""Parse ``DocstringMeta`` objects from the body of this section.
:param text: section body text. Should be cleaned with
``inspect.cleandoc`` before parsing.
"""
yield DocstringMeta([self.key], description=_clean_str(text))
| false | 0 |
59 |
docstring_parser
|
docstring_parser.numpydoc
|
_KVSection
|
parse
|
def parse(self, text: str) -> T.Iterable[DocstringMeta]:
for match, next_match in _pairwise(KV_REGEX.finditer(text)):
start = match.end()
end = next_match.start() if next_match is not None else None
value = text[start:end]
yield self._parse_item(
key=match.group(), value=inspect.cleandoc(value)
)
|
[
93,
98
] | false |
[
"KV_REGEX",
"PARAM_KEY_REGEX",
"PARAM_OPTIONAL_REGEX",
"PARAM_DEFAULT_REGEX",
"RETURN_KEY_REGEX",
"DEFAULT_SECTIONS"
] |
import inspect
import itertools
import re
import typing as T
from .common import (
Docstring,
DocstringDeprecated,
DocstringMeta,
DocstringParam,
DocstringRaises,
DocstringReturns,
)
KV_REGEX = re.compile(r"^[^\s].*$", flags=re.M)
PARAM_KEY_REGEX = re.compile(r"^(?P<name>.*?)(?:\s*:\s*(?P<type>.*?))?$")
PARAM_OPTIONAL_REGEX = re.compile(r"(?P<type>.*?)(?:, optional|\(optional\))$")
PARAM_DEFAULT_REGEX = re.compile(
r"[Dd]efault(?: is | = |: |s to |)\s*(?P<value>[\w\-\.]+)"
)
RETURN_KEY_REGEX = re.compile(r"^(?:(?P<name>.*?)\s*:\s*)?(?P<type>.*?)$")
DEFAULT_SECTIONS = [
ParamSection("Parameters", "param"),
ParamSection("Params", "param"),
ParamSection("Arguments", "param"),
ParamSection("Args", "param"),
ParamSection("Other Parameters", "other_param"),
ParamSection("Other Params", "other_param"),
ParamSection("Other Arguments", "other_param"),
ParamSection("Other Args", "other_param"),
ParamSection("Receives", "receives"),
ParamSection("Receive", "receives"),
RaisesSection("Raises", "raises"),
RaisesSection("Raise", "raises"),
RaisesSection("Warns", "warns"),
RaisesSection("Warn", "warns"),
ParamSection("Attributes", "attribute"),
ParamSection("Attribute", "attribute"),
ReturnsSection("Returns", "returns"),
ReturnsSection("Return", "returns"),
YieldsSection("Yields", "yields"),
YieldsSection("Yield", "yields"),
Section("Examples", "examples"),
Section("Example", "examples"),
Section("Warnings", "warnings"),
Section("Warning", "warnings"),
Section("See Also", "see_also"),
Section("Related", "see_also"),
Section("Notes", "notes"),
Section("Note", "notes"),
Section("References", "references"),
Section("Reference", "references"),
DeprecationSection("deprecated", "deprecation"),
]
class _KVSection(Section):
def parse(self, text: str) -> T.Iterable[DocstringMeta]:
for match, next_match in _pairwise(KV_REGEX.finditer(text)):
start = match.end()
end = next_match.start() if next_match is not None else None
value = text[start:end]
yield self._parse_item(
key=match.group(), value=inspect.cleandoc(value)
)
| true | 2 |
60 |
docstring_parser
|
docstring_parser.numpydoc
|
DeprecationSection
|
parse
|
def parse(self, text: str) -> T.Iterable[DocstringDeprecated]:
version, desc, *_ = text.split(sep="\n", maxsplit=1) + [None, None]
if desc is not None:
desc = _clean_str(inspect.cleandoc(desc))
yield DocstringDeprecated(
args=[self.key], description=desc, version=_clean_str(version)
)
|
[
209,
215
] | false |
[
"KV_REGEX",
"PARAM_KEY_REGEX",
"PARAM_OPTIONAL_REGEX",
"PARAM_DEFAULT_REGEX",
"RETURN_KEY_REGEX",
"DEFAULT_SECTIONS"
] |
import inspect
import itertools
import re
import typing as T
from .common import (
Docstring,
DocstringDeprecated,
DocstringMeta,
DocstringParam,
DocstringRaises,
DocstringReturns,
)
KV_REGEX = re.compile(r"^[^\s].*$", flags=re.M)
PARAM_KEY_REGEX = re.compile(r"^(?P<name>.*?)(?:\s*:\s*(?P<type>.*?))?$")
PARAM_OPTIONAL_REGEX = re.compile(r"(?P<type>.*?)(?:, optional|\(optional\))$")
PARAM_DEFAULT_REGEX = re.compile(
r"[Dd]efault(?: is | = |: |s to |)\s*(?P<value>[\w\-\.]+)"
)
RETURN_KEY_REGEX = re.compile(r"^(?:(?P<name>.*?)\s*:\s*)?(?P<type>.*?)$")
DEFAULT_SECTIONS = [
ParamSection("Parameters", "param"),
ParamSection("Params", "param"),
ParamSection("Arguments", "param"),
ParamSection("Args", "param"),
ParamSection("Other Parameters", "other_param"),
ParamSection("Other Params", "other_param"),
ParamSection("Other Arguments", "other_param"),
ParamSection("Other Args", "other_param"),
ParamSection("Receives", "receives"),
ParamSection("Receive", "receives"),
RaisesSection("Raises", "raises"),
RaisesSection("Raise", "raises"),
RaisesSection("Warns", "warns"),
RaisesSection("Warn", "warns"),
ParamSection("Attributes", "attribute"),
ParamSection("Attribute", "attribute"),
ReturnsSection("Returns", "returns"),
ReturnsSection("Return", "returns"),
YieldsSection("Yields", "yields"),
YieldsSection("Yield", "yields"),
Section("Examples", "examples"),
Section("Example", "examples"),
Section("Warnings", "warnings"),
Section("Warning", "warnings"),
Section("See Also", "see_also"),
Section("Related", "see_also"),
Section("Notes", "notes"),
Section("Note", "notes"),
Section("References", "references"),
Section("Reference", "references"),
DeprecationSection("deprecated", "deprecation"),
]
class DeprecationSection(_SphinxSection):
def parse(self, text: str) -> T.Iterable[DocstringDeprecated]:
version, desc, *_ = text.split(sep="\n", maxsplit=1) + [None, None]
if desc is not None:
desc = _clean_str(inspect.cleandoc(desc))
yield DocstringDeprecated(
args=[self.key], description=desc, version=_clean_str(version)
)
| true | 2 |
61 |
docstring_parser
|
docstring_parser.numpydoc
|
NumpydocParser
|
__init__
|
def __init__(self, sections: T.Optional[T.Dict[str, Section]] = None):
"""Setup sections.
:param sections: Recognized sections or None to defaults.
"""
sections = sections or DEFAULT_SECTIONS
self.sections = {s.title: s for s in sections}
self._setup()
|
[
256,
263
] | false |
[
"KV_REGEX",
"PARAM_KEY_REGEX",
"PARAM_OPTIONAL_REGEX",
"PARAM_DEFAULT_REGEX",
"RETURN_KEY_REGEX",
"DEFAULT_SECTIONS"
] |
import inspect
import itertools
import re
import typing as T
from .common import (
Docstring,
DocstringDeprecated,
DocstringMeta,
DocstringParam,
DocstringRaises,
DocstringReturns,
)
KV_REGEX = re.compile(r"^[^\s].*$", flags=re.M)
PARAM_KEY_REGEX = re.compile(r"^(?P<name>.*?)(?:\s*:\s*(?P<type>.*?))?$")
PARAM_OPTIONAL_REGEX = re.compile(r"(?P<type>.*?)(?:, optional|\(optional\))$")
PARAM_DEFAULT_REGEX = re.compile(
r"[Dd]efault(?: is | = |: |s to |)\s*(?P<value>[\w\-\.]+)"
)
RETURN_KEY_REGEX = re.compile(r"^(?:(?P<name>.*?)\s*:\s*)?(?P<type>.*?)$")
DEFAULT_SECTIONS = [
ParamSection("Parameters", "param"),
ParamSection("Params", "param"),
ParamSection("Arguments", "param"),
ParamSection("Args", "param"),
ParamSection("Other Parameters", "other_param"),
ParamSection("Other Params", "other_param"),
ParamSection("Other Arguments", "other_param"),
ParamSection("Other Args", "other_param"),
ParamSection("Receives", "receives"),
ParamSection("Receive", "receives"),
RaisesSection("Raises", "raises"),
RaisesSection("Raise", "raises"),
RaisesSection("Warns", "warns"),
RaisesSection("Warn", "warns"),
ParamSection("Attributes", "attribute"),
ParamSection("Attribute", "attribute"),
ReturnsSection("Returns", "returns"),
ReturnsSection("Return", "returns"),
YieldsSection("Yields", "yields"),
YieldsSection("Yield", "yields"),
Section("Examples", "examples"),
Section("Example", "examples"),
Section("Warnings", "warnings"),
Section("Warning", "warnings"),
Section("See Also", "see_also"),
Section("Related", "see_also"),
Section("Notes", "notes"),
Section("Note", "notes"),
Section("References", "references"),
Section("Reference", "references"),
DeprecationSection("deprecated", "deprecation"),
]
class Section:
def __init__(self, title: str, key: str) -> None:
self.title = title
self.key = key
class NumpydocParser:
def __init__(self, sections: T.Optional[T.Dict[str, Section]] = None):
"""Setup sections.
:param sections: Recognized sections or None to defaults.
"""
sections = sections or DEFAULT_SECTIONS
self.sections = {s.title: s for s in sections}
self._setup()
| false | 0 |
62 |
docstring_parser
|
docstring_parser.numpydoc
|
NumpydocParser
|
add_section
|
def add_section(self, section: Section):
"""Add or replace a section.
:param section: The new section.
"""
self.sections[section.title] = section
self._setup()
|
[
271,
278
] | false |
[
"KV_REGEX",
"PARAM_KEY_REGEX",
"PARAM_OPTIONAL_REGEX",
"PARAM_DEFAULT_REGEX",
"RETURN_KEY_REGEX",
"DEFAULT_SECTIONS"
] |
import inspect
import itertools
import re
import typing as T
from .common import (
Docstring,
DocstringDeprecated,
DocstringMeta,
DocstringParam,
DocstringRaises,
DocstringReturns,
)
KV_REGEX = re.compile(r"^[^\s].*$", flags=re.M)
PARAM_KEY_REGEX = re.compile(r"^(?P<name>.*?)(?:\s*:\s*(?P<type>.*?))?$")
PARAM_OPTIONAL_REGEX = re.compile(r"(?P<type>.*?)(?:, optional|\(optional\))$")
PARAM_DEFAULT_REGEX = re.compile(
r"[Dd]efault(?: is | = |: |s to |)\s*(?P<value>[\w\-\.]+)"
)
RETURN_KEY_REGEX = re.compile(r"^(?:(?P<name>.*?)\s*:\s*)?(?P<type>.*?)$")
DEFAULT_SECTIONS = [
ParamSection("Parameters", "param"),
ParamSection("Params", "param"),
ParamSection("Arguments", "param"),
ParamSection("Args", "param"),
ParamSection("Other Parameters", "other_param"),
ParamSection("Other Params", "other_param"),
ParamSection("Other Arguments", "other_param"),
ParamSection("Other Args", "other_param"),
ParamSection("Receives", "receives"),
ParamSection("Receive", "receives"),
RaisesSection("Raises", "raises"),
RaisesSection("Raise", "raises"),
RaisesSection("Warns", "warns"),
RaisesSection("Warn", "warns"),
ParamSection("Attributes", "attribute"),
ParamSection("Attribute", "attribute"),
ReturnsSection("Returns", "returns"),
ReturnsSection("Return", "returns"),
YieldsSection("Yields", "yields"),
YieldsSection("Yield", "yields"),
Section("Examples", "examples"),
Section("Example", "examples"),
Section("Warnings", "warnings"),
Section("Warning", "warnings"),
Section("See Also", "see_also"),
Section("Related", "see_also"),
Section("Notes", "notes"),
Section("Note", "notes"),
Section("References", "references"),
Section("Reference", "references"),
DeprecationSection("deprecated", "deprecation"),
]
class Section:
def __init__(self, title: str, key: str) -> None:
self.title = title
self.key = key
class NumpydocParser:
def __init__(self, sections: T.Optional[T.Dict[str, Section]] = None):
"""Setup sections.
:param sections: Recognized sections or None to defaults.
"""
sections = sections or DEFAULT_SECTIONS
self.sections = {s.title: s for s in sections}
self._setup()
def add_section(self, section: Section):
"""Add or replace a section.
:param section: The new section.
"""
self.sections[section.title] = section
self._setup()
| false | 0 |
63 |
docstring_parser
|
docstring_parser.numpydoc
|
NumpydocParser
|
parse
|
def parse(self, text: str) -> Docstring:
"""Parse the numpy-style docstring into its components.
:returns: parsed docstring
"""
ret = Docstring()
if not text:
return ret
# Clean according to PEP-0257
text = inspect.cleandoc(text)
# Find first title and split on its position
match = self.titles_re.search(text)
if match:
desc_chunk = text[: match.start()]
meta_chunk = text[match.start() :]
else:
desc_chunk = text
meta_chunk = ""
# Break description into short and long parts
parts = desc_chunk.split("\n", 1)
ret.short_description = parts[0] or None
if len(parts) > 1:
long_desc_chunk = parts[1] or ""
ret.blank_after_short_description = long_desc_chunk.startswith(
"\n"
)
ret.blank_after_long_description = long_desc_chunk.endswith("\n\n")
ret.long_description = long_desc_chunk.strip() or None
for match, nextmatch in _pairwise(self.titles_re.finditer(meta_chunk)):
title = next(g for g in match.groups() if g is not None)
factory = self.sections[title]
# section chunk starts after the header,
# ends at the start of the next header
start = match.end()
end = nextmatch.start() if nextmatch is not None else None
ret.meta.extend(factory.parse(meta_chunk[start:end]))
return ret
|
[
280,
322
] | false |
[
"KV_REGEX",
"PARAM_KEY_REGEX",
"PARAM_OPTIONAL_REGEX",
"PARAM_DEFAULT_REGEX",
"RETURN_KEY_REGEX",
"DEFAULT_SECTIONS"
] |
import inspect
import itertools
import re
import typing as T
from .common import (
Docstring,
DocstringDeprecated,
DocstringMeta,
DocstringParam,
DocstringRaises,
DocstringReturns,
)
KV_REGEX = re.compile(r"^[^\s].*$", flags=re.M)
PARAM_KEY_REGEX = re.compile(r"^(?P<name>.*?)(?:\s*:\s*(?P<type>.*?))?$")
PARAM_OPTIONAL_REGEX = re.compile(r"(?P<type>.*?)(?:, optional|\(optional\))$")
PARAM_DEFAULT_REGEX = re.compile(
r"[Dd]efault(?: is | = |: |s to |)\s*(?P<value>[\w\-\.]+)"
)
RETURN_KEY_REGEX = re.compile(r"^(?:(?P<name>.*?)\s*:\s*)?(?P<type>.*?)$")
DEFAULT_SECTIONS = [
ParamSection("Parameters", "param"),
ParamSection("Params", "param"),
ParamSection("Arguments", "param"),
ParamSection("Args", "param"),
ParamSection("Other Parameters", "other_param"),
ParamSection("Other Params", "other_param"),
ParamSection("Other Arguments", "other_param"),
ParamSection("Other Args", "other_param"),
ParamSection("Receives", "receives"),
ParamSection("Receive", "receives"),
RaisesSection("Raises", "raises"),
RaisesSection("Raise", "raises"),
RaisesSection("Warns", "warns"),
RaisesSection("Warn", "warns"),
ParamSection("Attributes", "attribute"),
ParamSection("Attribute", "attribute"),
ReturnsSection("Returns", "returns"),
ReturnsSection("Return", "returns"),
YieldsSection("Yields", "yields"),
YieldsSection("Yield", "yields"),
Section("Examples", "examples"),
Section("Example", "examples"),
Section("Warnings", "warnings"),
Section("Warning", "warnings"),
Section("See Also", "see_also"),
Section("Related", "see_also"),
Section("Notes", "notes"),
Section("Note", "notes"),
Section("References", "references"),
Section("Reference", "references"),
DeprecationSection("deprecated", "deprecation"),
]
class NumpydocParser:
def __init__(self, sections: T.Optional[T.Dict[str, Section]] = None):
"""Setup sections.
:param sections: Recognized sections or None to defaults.
"""
sections = sections or DEFAULT_SECTIONS
self.sections = {s.title: s for s in sections}
self._setup()
def parse(self, text: str) -> Docstring:
"""Parse the numpy-style docstring into its components.
:returns: parsed docstring
"""
ret = Docstring()
if not text:
return ret
# Clean according to PEP-0257
text = inspect.cleandoc(text)
# Find first title and split on its position
match = self.titles_re.search(text)
if match:
desc_chunk = text[: match.start()]
meta_chunk = text[match.start() :]
else:
desc_chunk = text
meta_chunk = ""
# Break description into short and long parts
parts = desc_chunk.split("\n", 1)
ret.short_description = parts[0] or None
if len(parts) > 1:
long_desc_chunk = parts[1] or ""
ret.blank_after_short_description = long_desc_chunk.startswith(
"\n"
)
ret.blank_after_long_description = long_desc_chunk.endswith("\n\n")
ret.long_description = long_desc_chunk.strip() or None
for match, nextmatch in _pairwise(self.titles_re.finditer(meta_chunk)):
title = next(g for g in match.groups() if g is not None)
factory = self.sections[title]
# section chunk starts after the header,
# ends at the start of the next header
start = match.end()
end = nextmatch.start() if nextmatch is not None else None
ret.meta.extend(factory.parse(meta_chunk[start:end]))
return ret
| true | 2 |
64 |
docstring_parser
|
docstring_parser.parser
|
parse
|
def parse(text: str, style: Style = Style.auto) -> Docstring:
"""Parse the docstring into its components.
:param text: docstring text to parse
:param style: docstring style
:returns: parsed docstring representation
"""
if style != Style.auto:
return STYLES[style](text)
rets = []
for parse_ in STYLES.values():
try:
rets.append(parse_(text))
except ParseError as e:
exc = e
if not rets:
raise exc
return sorted(rets, key=lambda d: len(d.meta), reverse=True)[0]
|
[
6,
24
] | false |
[] |
from docstring_parser.common import Docstring, ParseError
from docstring_parser.styles import STYLES, Style
def parse(text: str, style: Style = Style.auto) -> Docstring:
"""Parse the docstring into its components.
:param text: docstring text to parse
:param style: docstring style
:returns: parsed docstring representation
"""
if style != Style.auto:
return STYLES[style](text)
rets = []
for parse_ in STYLES.values():
try:
rets.append(parse_(text))
except ParseError as e:
exc = e
if not rets:
raise exc
return sorted(rets, key=lambda d: len(d.meta), reverse=True)[0]
| true | 2 |
|
65 |
docstring_parser
|
docstring_parser.rest
|
parse
|
def parse(text: str) -> Docstring:
"""Parse the ReST-style docstring into its components.
:returns: parsed docstring
"""
ret = Docstring()
if not text:
return ret
text = inspect.cleandoc(text)
match = re.search("^:", text, flags=re.M)
if match:
desc_chunk = text[: match.start()]
meta_chunk = text[match.start() :]
else:
desc_chunk = text
meta_chunk = ""
parts = desc_chunk.split("\n", 1)
ret.short_description = parts[0] or None
if len(parts) > 1:
long_desc_chunk = parts[1] or ""
ret.blank_after_short_description = long_desc_chunk.startswith("\n")
ret.blank_after_long_description = long_desc_chunk.endswith("\n\n")
ret.long_description = long_desc_chunk.strip() or None
for match in re.finditer(
r"(^:.*?)(?=^:|\Z)", meta_chunk, flags=re.S | re.M
):
chunk = match.group(0)
if not chunk:
continue
try:
args_chunk, desc_chunk = chunk.lstrip(":").split(":", 1)
except ValueError:
raise ParseError(
'Error parsing meta information near "{}".'.format(chunk)
)
args = args_chunk.split()
desc = desc_chunk.strip()
if "\n" in desc:
first_line, rest = desc.split("\n", 1)
desc = first_line + "\n" + inspect.cleandoc(rest)
ret.meta.append(_build_meta(args, desc))
return ret
|
[
85,
131
] | false |
[] |
import inspect
import re
import typing as T
from .common import (
PARAM_KEYWORDS,
RAISES_KEYWORDS,
RETURNS_KEYWORDS,
YIELDS_KEYWORDS,
Docstring,
DocstringMeta,
DocstringParam,
DocstringRaises,
DocstringReturns,
ParseError,
)
def parse(text: str) -> Docstring:
"""Parse the ReST-style docstring into its components.
:returns: parsed docstring
"""
ret = Docstring()
if not text:
return ret
text = inspect.cleandoc(text)
match = re.search("^:", text, flags=re.M)
if match:
desc_chunk = text[: match.start()]
meta_chunk = text[match.start() :]
else:
desc_chunk = text
meta_chunk = ""
parts = desc_chunk.split("\n", 1)
ret.short_description = parts[0] or None
if len(parts) > 1:
long_desc_chunk = parts[1] or ""
ret.blank_after_short_description = long_desc_chunk.startswith("\n")
ret.blank_after_long_description = long_desc_chunk.endswith("\n\n")
ret.long_description = long_desc_chunk.strip() or None
for match in re.finditer(
r"(^:.*?)(?=^:|\Z)", meta_chunk, flags=re.S | re.M
):
chunk = match.group(0)
if not chunk:
continue
try:
args_chunk, desc_chunk = chunk.lstrip(":").split(":", 1)
except ValueError:
raise ParseError(
'Error parsing meta information near "{}".'.format(chunk)
)
args = args_chunk.split()
desc = desc_chunk.strip()
if "\n" in desc:
first_line, rest = desc.split("\n", 1)
desc = first_line + "\n" + inspect.cleandoc(rest)
ret.meta.append(_build_meta(args, desc))
return ret
| true | 2 |
|
66 |
flutes
|
flutes.iterator
|
chunk
|
def chunk(n: int, iterable: Iterable[T]) -> Iterator[List[T]]:
r"""Split the iterable into chunks, with each chunk containing no more than ``n`` elements.
.. code:: python
>>> list(chunk(3, range(10)))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
:param n: The maximum number of elements in one chunk.
:param iterable: The iterable.
:return: An iterator over chunks.
"""
if n <= 0:
raise ValueError("`n` should be positive")
group = []
for x in iterable:
group.append(x)
if len(group) == n:
yield group
group = []
if len(group) > 0:
yield group
|
[
22,
43
] | false |
[
"__all__",
"T",
"A",
"B",
"R"
] |
import weakref
from typing import Callable, Generic, Iterable, Iterator, List, Optional, Sequence, TypeVar, overload
__all__ = [
"chunk",
"take",
"drop",
"drop_until",
"split_by",
"scanl",
"scanr",
"LazyList",
"Range",
"MapList",
]
T = TypeVar('T')
A = TypeVar('A')
B = TypeVar('B')
R = TypeVar('R')
def chunk(n: int, iterable: Iterable[T]) -> Iterator[List[T]]:
r"""Split the iterable into chunks, with each chunk containing no more than ``n`` elements.
.. code:: python
>>> list(chunk(3, range(10)))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
:param n: The maximum number of elements in one chunk.
:param iterable: The iterable.
:return: An iterator over chunks.
"""
if n <= 0:
raise ValueError("`n` should be positive")
group = []
for x in iterable:
group.append(x)
if len(group) == n:
yield group
group = []
if len(group) > 0:
yield group
| true | 2 |
|
67 |
flutes
|
flutes.iterator
|
take
|
def take(n: int, iterable: Iterable[T]) -> Iterator[T]:
r"""Take the first :attr:`n` elements from an iterable.
.. code:: python
>>> list(take(5, range(1000000)))
[0, 1, 2, 3, 4]
:param n: The number of elements to take.
:param iterable: The iterable.
:return: An iterator returning the first :attr:`n` elements from the iterable.
"""
if n < 0:
raise ValueError("`n` should be non-negative")
try:
it = iter(iterable)
for _ in range(n):
yield next(it)
except StopIteration:
pass
|
[
46,
65
] | false |
[
"__all__",
"T",
"A",
"B",
"R"
] |
import weakref
from typing import Callable, Generic, Iterable, Iterator, List, Optional, Sequence, TypeVar, overload
__all__ = [
"chunk",
"take",
"drop",
"drop_until",
"split_by",
"scanl",
"scanr",
"LazyList",
"Range",
"MapList",
]
T = TypeVar('T')
A = TypeVar('A')
B = TypeVar('B')
R = TypeVar('R')
def take(n: int, iterable: Iterable[T]) -> Iterator[T]:
r"""Take the first :attr:`n` elements from an iterable.
.. code:: python
>>> list(take(5, range(1000000)))
[0, 1, 2, 3, 4]
:param n: The number of elements to take.
:param iterable: The iterable.
:return: An iterator returning the first :attr:`n` elements from the iterable.
"""
if n < 0:
raise ValueError("`n` should be non-negative")
try:
it = iter(iterable)
for _ in range(n):
yield next(it)
except StopIteration:
pass
| true | 2 |
|
68 |
flutes
|
flutes.iterator
|
drop
|
def drop(n: int, iterable: Iterable[T]) -> Iterator[T]:
r"""Drop the first :attr:`n` elements from an iterable, and return the rest as an iterator.
.. code:: python
>>> next(drop(5, range(1000000)))
5
:param n: The number of elements to drop.
:param iterable: The iterable.
:return: An iterator returning the remaining part of the iterable after the first :attr:`n` elements.
"""
if n < 0:
raise ValueError("`n` should be non-negative")
try:
it = iter(iterable)
for _ in range(n):
next(it)
yield from it
except StopIteration:
pass
|
[
68,
88
] | false |
[
"__all__",
"T",
"A",
"B",
"R"
] |
import weakref
from typing import Callable, Generic, Iterable, Iterator, List, Optional, Sequence, TypeVar, overload
__all__ = [
"chunk",
"take",
"drop",
"drop_until",
"split_by",
"scanl",
"scanr",
"LazyList",
"Range",
"MapList",
]
T = TypeVar('T')
A = TypeVar('A')
B = TypeVar('B')
R = TypeVar('R')
def drop(n: int, iterable: Iterable[T]) -> Iterator[T]:
r"""Drop the first :attr:`n` elements from an iterable, and return the rest as an iterator.
.. code:: python
>>> next(drop(5, range(1000000)))
5
:param n: The number of elements to drop.
:param iterable: The iterable.
:return: An iterator returning the remaining part of the iterable after the first :attr:`n` elements.
"""
if n < 0:
raise ValueError("`n` should be non-negative")
try:
it = iter(iterable)
for _ in range(n):
next(it)
yield from it
except StopIteration:
pass
| true | 2 |
|
69 |
flutes
|
flutes.iterator
|
drop_until
|
def drop_until(pred_fn: Callable[[T], bool], iterable: Iterable[T]) -> Iterator[T]:
r"""Drop elements from the iterable until an element that satisfies the predicate is encountered. Similar to the
built-in :py:func:`filter` function, but only applied to a prefix of the iterable.
.. code:: python
>>> list(drop_until(lambda x: x > 5, range(10)))
[6, 7, 8, 9]
:param pred_fn: The predicate that returned elements should satisfy.
:param iterable: The iterable.
:return: The iterator after dropping elements.
"""
iterator = iter(iterable)
for item in iterator:
if not pred_fn(item):
continue
yield item
break
yield from iterator
|
[
91,
110
] | false |
[
"__all__",
"T",
"A",
"B",
"R"
] |
import weakref
from typing import Callable, Generic, Iterable, Iterator, List, Optional, Sequence, TypeVar, overload
__all__ = [
"chunk",
"take",
"drop",
"drop_until",
"split_by",
"scanl",
"scanr",
"LazyList",
"Range",
"MapList",
]
T = TypeVar('T')
A = TypeVar('A')
B = TypeVar('B')
R = TypeVar('R')
def drop_until(pred_fn: Callable[[T], bool], iterable: Iterable[T]) -> Iterator[T]:
r"""Drop elements from the iterable until an element that satisfies the predicate is encountered. Similar to the
built-in :py:func:`filter` function, but only applied to a prefix of the iterable.
.. code:: python
>>> list(drop_until(lambda x: x > 5, range(10)))
[6, 7, 8, 9]
:param pred_fn: The predicate that returned elements should satisfy.
:param iterable: The iterable.
:return: The iterator after dropping elements.
"""
iterator = iter(iterable)
for item in iterator:
if not pred_fn(item):
continue
yield item
break
yield from iterator
| true | 2 |
|
70 |
flutes
|
flutes.iterator
|
split_by
|
def split_by(iterable: Iterable[A], empty_segments: bool = False, *, criterion=None, separator=None) \
-> Iterator[List[A]]:
r"""Split a list into sub-lists by dropping certain elements. Exactly one of ``criterion`` and ``separator`` must be
specified. For example:
.. code:: python
>>> list(split_by(range(10), criterion=lambda x: x % 3 == 0))
[[1, 2], [4, 5], [7, 8]]
>>> list(split_by(" Split by: ", empty_segments=True, separator='.'))
[[], ['S', 'p', 'l', 'i', 't'], ['b', 'y', ':'], []]
:param iterable: The list to split.
:param empty_segments: If ``True``, include an empty list in cases where two adjacent elements satisfy
the criterion.
:param criterion: The criterion to decide whether to drop an element.
:param separator: The separator for sub-lists. An element is dropped if it is equal to ``parameter``.
:return: List of sub-lists.
"""
if not ((criterion is None) ^ (separator is None)):
raise ValueError("Exactly one of `criterion` and `separator` should be specified")
if criterion is None:
criterion = lambda x: x == separator
group = []
for x in iterable:
if not criterion(x):
group.append(x)
else:
if len(group) > 0 or empty_segments:
yield group
group = []
if len(group) > 0 or empty_segments:
yield group
|
[
123,
156
] | false |
[
"__all__",
"T",
"A",
"B",
"R"
] |
import weakref
from typing import Callable, Generic, Iterable, Iterator, List, Optional, Sequence, TypeVar, overload
__all__ = [
"chunk",
"take",
"drop",
"drop_until",
"split_by",
"scanl",
"scanr",
"LazyList",
"Range",
"MapList",
]
T = TypeVar('T')
A = TypeVar('A')
B = TypeVar('B')
R = TypeVar('R')
def split_by(iterable: Iterable[A], empty_segments: bool = False, *, criterion=None, separator=None) \
-> Iterator[List[A]]:
r"""Split a list into sub-lists by dropping certain elements. Exactly one of ``criterion`` and ``separator`` must be
specified. For example:
.. code:: python
>>> list(split_by(range(10), criterion=lambda x: x % 3 == 0))
[[1, 2], [4, 5], [7, 8]]
>>> list(split_by(" Split by: ", empty_segments=True, separator='.'))
[[], ['S', 'p', 'l', 'i', 't'], ['b', 'y', ':'], []]
:param iterable: The list to split.
:param empty_segments: If ``True``, include an empty list in cases where two adjacent elements satisfy
the criterion.
:param criterion: The criterion to decide whether to drop an element.
:param separator: The separator for sub-lists. An element is dropped if it is equal to ``parameter``.
:return: List of sub-lists.
"""
if not ((criterion is None) ^ (separator is None)):
raise ValueError("Exactly one of `criterion` and `separator` should be specified")
if criterion is None:
criterion = lambda x: x == separator
group = []
for x in iterable:
if not criterion(x):
group.append(x)
else:
if len(group) > 0 or empty_segments:
yield group
group = []
if len(group) > 0 or empty_segments:
yield group
| true | 2 |
|
71 |
flutes
|
flutes.iterator
|
scanl
|
def scanl(func, iterable, *args):
r"""Computes the intermediate results of :py:func:`~functools.reduce`. Equivalent to Haskell's ``scanl``. For
example:
.. code:: python
>>> list(scanl(operator.add, [1, 2, 3, 4], 0))
[0, 1, 3, 6, 10]
>>> list(scanl(lambda s, x: x + s, ['a', 'b', 'c', 'd']))
['a', 'ba', 'cba', 'dcba']
Learn more at `Learn You a Haskell: Higher Order Functions <http://learnyouahaskell.com/higher-order-functions>`_.
:param func: The function to apply. This should be a binary function where the arguments are: the accumulator,
and the current element.
:param iterable: The list of elements to iteratively apply the function to.
:param initial: The initial value for the accumulator. If not supplied, the first element in the list is used.
:return: The intermediate results at each step.
"""
iterable = iter(iterable)
if len(args) == 1:
acc = args[0]
elif len(args) == 0:
acc = next(iterable)
else:
raise ValueError("Too many arguments")
yield acc
for x in iterable:
acc = func(acc, x)
yield acc
|
[
167,
196
] | false |
[
"__all__",
"T",
"A",
"B",
"R"
] |
import weakref
from typing import Callable, Generic, Iterable, Iterator, List, Optional, Sequence, TypeVar, overload
__all__ = [
"chunk",
"take",
"drop",
"drop_until",
"split_by",
"scanl",
"scanr",
"LazyList",
"Range",
"MapList",
]
T = TypeVar('T')
A = TypeVar('A')
B = TypeVar('B')
R = TypeVar('R')
def scanl(func, iterable, *args):
r"""Computes the intermediate results of :py:func:`~functools.reduce`. Equivalent to Haskell's ``scanl``. For
example:
.. code:: python
>>> list(scanl(operator.add, [1, 2, 3, 4], 0))
[0, 1, 3, 6, 10]
>>> list(scanl(lambda s, x: x + s, ['a', 'b', 'c', 'd']))
['a', 'ba', 'cba', 'dcba']
Learn more at `Learn You a Haskell: Higher Order Functions <http://learnyouahaskell.com/higher-order-functions>`_.
:param func: The function to apply. This should be a binary function where the arguments are: the accumulator,
and the current element.
:param iterable: The list of elements to iteratively apply the function to.
:param initial: The initial value for the accumulator. If not supplied, the first element in the list is used.
:return: The intermediate results at each step.
"""
iterable = iter(iterable)
if len(args) == 1:
acc = args[0]
elif len(args) == 0:
acc = next(iterable)
else:
raise ValueError("Too many arguments")
yield acc
for x in iterable:
acc = func(acc, x)
yield acc
| true | 2 |
|
72 |
flutes
|
flutes.iterator
|
LazyList
|
__iter__
|
def __iter__(self):
if self.exhausted:
return iter(self.list)
return self.LazyListIterator(self)
|
[
257,
260
] | false |
[
"__all__",
"T",
"A",
"B",
"R"
] |
import weakref
from typing import Callable, Generic, Iterable, Iterator, List, Optional, Sequence, TypeVar, overload
__all__ = [
"chunk",
"take",
"drop",
"drop_until",
"split_by",
"scanl",
"scanr",
"LazyList",
"Range",
"MapList",
]
T = TypeVar('T')
A = TypeVar('A')
B = TypeVar('B')
R = TypeVar('R')
class LazyList(Generic[T], Sequence[T]):
def __init__(self, iterable: Iterable[T]):
self.iter = iter(iterable)
self.exhausted = False
self.list: List[T] = []
def __iter__(self):
if self.exhausted:
return iter(self.list)
return self.LazyListIterator(self)
| true | 2 |
73 |
flutes
|
flutes.iterator
|
LazyList
|
__getitem__
|
def __getitem__(self, idx):
if isinstance(idx, slice):
self._fetch_until(idx.stop)
else:
self._fetch_until(idx)
return self.list[idx]
|
[
280,
285
] | false |
[
"__all__",
"T",
"A",
"B",
"R"
] |
import weakref
from typing import Callable, Generic, Iterable, Iterator, List, Optional, Sequence, TypeVar, overload
__all__ = [
"chunk",
"take",
"drop",
"drop_until",
"split_by",
"scanl",
"scanr",
"LazyList",
"Range",
"MapList",
]
T = TypeVar('T')
A = TypeVar('A')
B = TypeVar('B')
R = TypeVar('R')
class LazyList(Generic[T], Sequence[T]):
def __init__(self, iterable: Iterable[T]):
self.iter = iter(iterable)
self.exhausted = False
self.list: List[T] = []
def __getitem__(self, idx):
if isinstance(idx, slice):
self._fetch_until(idx.stop)
else:
self._fetch_until(idx)
return self.list[idx]
| true | 2 |
74 |
flutes
|
flutes.iterator
|
LazyList
|
__len__
|
def __len__(self):
if self.exhausted:
return len(self.list)
else:
raise TypeError("__len__ is not available before the iterable is depleted")
|
[
287,
291
] | false |
[
"__all__",
"T",
"A",
"B",
"R"
] |
import weakref
from typing import Callable, Generic, Iterable, Iterator, List, Optional, Sequence, TypeVar, overload
__all__ = [
"chunk",
"take",
"drop",
"drop_until",
"split_by",
"scanl",
"scanr",
"LazyList",
"Range",
"MapList",
]
T = TypeVar('T')
A = TypeVar('A')
B = TypeVar('B')
R = TypeVar('R')
class LazyList(Generic[T], Sequence[T]):
def __init__(self, iterable: Iterable[T]):
self.iter = iter(iterable)
self.exhausted = False
self.list: List[T] = []
def __len__(self):
if self.exhausted:
return len(self.list)
else:
raise TypeError("__len__ is not available before the iterable is depleted")
| true | 2 |
75 |
flutes
|
flutes.iterator
|
Range
|
__next__
|
def __next__(self) -> int:
if self.val >= self.r:
raise StopIteration
result = self.val
self.val += self.step
return result
|
[
332,
337
] | false |
[
"__all__",
"T",
"A",
"B",
"R"
] |
import weakref
from typing import Callable, Generic, Iterable, Iterator, List, Optional, Sequence, TypeVar, overload
__all__ = [
"chunk",
"take",
"drop",
"drop_until",
"split_by",
"scanl",
"scanr",
"LazyList",
"Range",
"MapList",
]
T = TypeVar('T')
A = TypeVar('A')
B = TypeVar('B')
R = TypeVar('R')
class Range(Sequence[int]):
def __init__(self, *args):
if len(args) == 0 or len(args) > 3:
raise ValueError("Range should be called the same way as the builtin `range`")
if len(args) == 1:
self.l = 0
self.r = args[0]
self.step = 1
else:
self.l = args[0]
self.r = args[1]
self.step = 1 if len(args) == 2 else args[2]
self.val = self.l
self.length = (self.r - self.l) // self.step
def __next__(self) -> int:
if self.val >= self.r:
raise StopIteration
result = self.val
self.val += self.step
return result
| true | 2 |
End of preview. Expand
in Data Studio
⚠️ Note: The dataset symprompt_supp.jsonl is not created by us. We only supplemented this dataset with additional branch-level metadata (e.g., has_branch, total_branches) to enable coverage testing.
This helps users keep their workflows clean when determining whether branches exist, simplifying branch coverage calculation.
It originates from the paper:
Code-Aware Prompting: A Study of Coverage Guided Test Generation in Regression Setting using LLM
— Gabriel Ryan, Siddhartha Jain, Mingyue Shang, Shiqi Wang, Xiaofei Ma, Murali Krishna Ramanathan, Baishakhi Ray.
- Downloads last month
- 67