code
stringlengths 1
5.19M
| package
stringlengths 1
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import List, TypedDict, Union
from .finite import (FiniteAdjunction, FiniteDP, FiniteFunctor, FiniteGroup, FiniteMap, FiniteMonoid,
FiniteNaturalTransformation, FinitePoset, FiniteProfunctor, FiniteRelation,
FiniteSemigroup, FiniteSet)
__all__ = [
"FiniteSet_desc",
"FiniteMap_desc",
"FiniteSetUnion_desc",
"FiniteSetProduct_desc",
"FiniteSetDisjointUnion_desc",
"DirectElements_desc",
"FiniteProfunctor_desc",
"IOHelper",
"FiniteRelation_desc",
"FiniteFunctor_desc",
"FiniteSemigroup_desc",
"FiniteGroup_desc",
"FiniteMonoid_desc",
"FiniteSetRepresentation",
"FiniteMapRepresentation",
"FiniteAdjunction_desc",
"FiniteGroupRepresentation",
"FiniteAdjunctionRepresentation",
"FinitePosetRepresentation",
"FiniteNaturalTransformationRepresentation",
"FiniteFunctorRepresentation",
"FiniteRelationRepresentation",
"FiniteProfunctorRepresentation",
"FiniteMonoidRepresentation",
"FiniteSemigroupRepresentation",
"FiniteDPRepresentation",
]
from .helper import IOHelper
from .types import ConcreteRepr
class DirectElements_desc(TypedDict):
elements: List[ConcreteRepr]
class FiniteSetProduct_desc(TypedDict):
product: List[FiniteSet_desc]
class FiniteSetDisjointUnion_desc(TypedDict):
disunion: List[FiniteSet_desc]
class FiniteSetUnion_desc(TypedDict):
union: List[FiniteSet_desc]
FiniteSet_desc = Union[
DirectElements_desc,
FiniteSetProduct_desc,
FiniteSetUnion_desc,
FiniteSetDisjointUnion_desc,
]
class FiniteMap_desc(TypedDict):
source: FiniteSet_desc
target: FiniteSet_desc
values: List[List[ConcreteRepr]]
class FiniteMapRepresentation(ABC):
@abstractmethod
def load(self, h: IOHelper, s: FiniteMap_desc) -> FiniteMap:
...
@abstractmethod
def save(self, h: IOHelper, m: FiniteMap) -> FiniteMap_desc:
...
class FiniteSetRepresentation(ABC):
# @overload
# def load(self, h: IOHelper, data: FiniteSetDisjointUnion_desc) \
# -> FiniteSetDisjointUnion:
# ...
#
# @overload
# def load(self, h: IOHelper, data: FiniteSetUnion_desc) -> FiniteSet:
# ...
#
# @overload
# def load(self, h: IOHelper, data: FiniteSetProduct_desc) -> FiniteSetProduct:
# ...
@abstractmethod
def load(self, h: IOHelper, data: FiniteSet_desc) -> FiniteSet:
""" Load a finite set from data structure.
Throw InvalidFormat if the format is incorrect.
"""
@abstractmethod
def save(self, h: IOHelper, f: FiniteSet) -> FiniteSet_desc:
""" Serializes into a data structure """
class FiniteSemigroup_desc(TypedDict):
carrier: FiniteSet_desc
compose: FiniteMap_desc
class FiniteSemigroupRepresentation(ABC):
@abstractmethod
def load(self, h: IOHelper, s: FiniteSemigroup_desc) -> FiniteSemigroup:
""" Load the data """
@abstractmethod
def save(self, h: IOHelper, m: FiniteSemigroup) -> FiniteSemigroup_desc:
""" Save the data """
class FiniteNaturalTransformation_desc(TypedDict):
pass
class FiniteMonoid_desc(FiniteSemigroup_desc):
# carrier: FiniteSet_desc
# compose: FiniteMap_desc
neutral: object
class FiniteMonoidRepresentation(ABC):
@abstractmethod
def load(self, h: IOHelper, s: FiniteMonoid_desc) -> FiniteMonoid:
""" Load the data """
@abstractmethod
def save(self, h: IOHelper, m: FiniteMonoid) -> FiniteMonoid_desc:
""" Save the data """
class FiniteGroup_desc(FiniteMonoid_desc):
# carrier: FiniteSet_desc
# compose: FiniteMap_desc
# neutral: object
inv: FiniteMap_desc
class FinitePoset_desc(TypedDict):
pass
class FiniteGroupRepresentation(ABC):
@abstractmethod
def load(self, h: IOHelper, s: FiniteGroup_desc) -> FiniteGroup:
""" Load the data """
@abstractmethod
def save(self, h: IOHelper, m: FiniteGroup) -> FiniteGroup_desc:
""" Save the data """
class FinitePosetRepresentation(ABC):
@abstractmethod
def load(self, h: IOHelper, s: FinitePoset_desc) -> FinitePoset:
""" Load the data """
@abstractmethod
def save(self, h: IOHelper, m: FinitePoset) -> FinitePoset_desc:
""" Save the data """
class FiniteRelation_desc(TypedDict):
source: FiniteSet_desc
target: FiniteSet_desc
values: List[List[object]]
class FiniteRelationRepresentation(ABC):
@abstractmethod
def load(self, h: IOHelper, data: FiniteRelation_desc) -> FiniteRelation:
""" Load a finite set from given YAML data"""
@abstractmethod
def save(self, h: IOHelper, f: FiniteRelation) -> FiniteRelation_desc:
""" Load a finite set from given YAML data"""
class FiniteCategory_desc(TypedDict):
...
class FiniteFunctor_desc(TypedDict):
source: FiniteCategory_desc
target: FiniteCategory_desc
f_ob: FiniteMap
f_mor: FiniteMap
class FiniteFunctorRepresentation(ABC):
@abstractmethod
def load(self, h: IOHelper, data: FiniteFunctor_desc) -> FiniteFunctor:
...
@abstractmethod
def save(self, h: IOHelper, f: FiniteFunctor) -> FiniteFunctor_desc:
...
class FiniteNaturalTransformationRepresentation(ABC):
@abstractmethod
def load(self, h: IOHelper, data: FiniteNaturalTransformation_desc) -> FiniteNaturalTransformation:
...
@abstractmethod
def save(self, h: IOHelper, f: FiniteNaturalTransformation) -> FiniteNaturalTransformation_desc:
...
class FiniteAdjunction_desc(TypedDict):
...
class FiniteAdjunctionRepresentation(ABC):
@abstractmethod
def load(self, h: IOHelper, data: FiniteAdjunction_desc) -> FiniteAdjunction:
...
@abstractmethod
def save(self, h: IOHelper, f: FiniteAdjunction) -> FiniteAdjunction_desc:
...
class FiniteProfunctor_desc(TypedDict):
...
class FiniteProfunctorRepresentation(ABC):
@abstractmethod
def load(self, h: IOHelper, data: FiniteProfunctor_desc) -> FiniteProfunctor:
...
@abstractmethod
def save(self, h: IOHelper, f: FiniteProfunctor) -> FiniteProfunctor_desc:
...
class FiniteDP_desc(TypedDict):
pass
class FiniteDPRepresentation(ABC):
@abstractmethod
def load(self, yaml_data: FiniteDP_desc) -> FiniteDP:
...
@abstractmethod
def save(self, f: FiniteDP) -> FiniteDP_desc:
...
| ACT4E-exercises | /ACT4E-exercises-2021.1.2103061245.tar.gz/ACT4E-exercises-2021.1.2103061245/src/act4e_interfaces/representations.py | representations.py |
from __future__ import annotations
from abc import ABC, abstractmethod
__all__ = ['IOHelper']
class IOHelper(ABC):
@abstractmethod
def loadfile(self, name: str) -> dict:
""" Load from the filesystem. """
| ACT4E-exercises | /ACT4E-exercises-2021.1.2103061245.tar.gz/ACT4E-exercises-2021.1.2103061245/src/act4e_interfaces/helper.py | helper.py |
from abc import ABC
from dataclasses import dataclass
from typing import List
__all__ = ["Component", "Rope", "Elastic", "Brick", "Sphere", "Solution", "RupeGoldbergSolver"]
@dataclass
class Component:
mass: float
length: float
spring_const_pull: float
spring_const_push: float
max_compression: float
max_extension: float
@dataclass
class Rope(Component):
length: float
@dataclass
class Elastic(Rope):
elasticity: float # meaning?
@dataclass
class Brick(Component):
pass
# add other components / properties
@dataclass
class Sphere(Component):
pass
# add other components / properties
@dataclass
class Solution:
# does it break?
breaks: bool
# what is the total length (if not broken)?
total_length: float
class RupeGoldbergSolver(ABC):
def hangthem(self, components: List[Component]) -> Solution:
""" What if we hang the first component, and let the others hang below it? """
def push(self, components: List[Component], force: float) -> Solution:
""" What if we fix one endpoint, and we compress it? """
def pull(self, components: List[Component], force: float) -> Solution:
""" What if we fix one endpoint, and we pull the other end? """
| ACT4E-exercises | /ACT4E-exercises-2021.1.2103061245.tar.gz/ACT4E-exercises-2021.1.2103061245/src/act4e_interfaces/rope.py | rope.py |
from abc import ABC, abstractmethod
from typing import Callable
from .finite import Setoid
__all__ = []
| ACT4E-exercises | /ACT4E-exercises-2021.1.2103061245.tar.gz/ACT4E-exercises-2021.1.2103061245/src/act4e_interfaces/generic.py | generic.py |
__version__ = "2021.1.2103061245"
__data__ = ""
from .types import *
from .finite import *
from .generic import *
from .rope import *
from .representations import *
from .rope import *
| ACT4E-exercises | /ACT4E-exercises-2021.1.2103061245.tar.gz/ACT4E-exercises-2021.1.2103061245/src/act4e_interfaces/__init__.py | __init__.py |
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Callable, Iterator, List, Optional, overload, Set, Tuple
from .helper import IOHelper
from .types import ConcreteRepr, Element, Morphism, Object
__all__ = [
"InvalidFormat",
"InvalidValue",
"EnumerableSetsOperations",
"EnumerableSet",
"FiniteSetUnion",
"Setoid",
"SetoidOperations",
"SetProduct",
"SetDisjointUnion",
"MakeSetUnion",
"MakeSetDisjointUnion",
"MakeSetIntersection",
"MakeSetProduct",
"MonotoneMap",
"Mapping",
"Monoid",
"MonoidalCategory",
"MonoidalCategory",
"FiniteMap",
"FiniteDP",
"FiniteSet",
"FiniteSetProperties",
"FiniteSetProduct",
"FiniteProfunctor",
"FiniteSetDisjointUnion",
"FiniteFunctor",
"FiniteSemigroupConstruct",
"FiniteSemigroup",
"FinitePoset",
"FiniteAdjunction",
"FiniteMonoidalPoset",
"FiniteMonoidalCategory",
"FiniteCategory",
"FiniteEnrichedCategory",
"FiniteRelation",
"FiniteSemiCategory",
"FiniteMonoid",
"FiniteCategoryOperations",
"FiniteDPOperations",
"FiniteAdjunctionsOperations",
"FiniteProfunctorOperations",
"FiniteGroup",
"FiniteLattice",
"FiniteRelationProperties",
"FiniteRelationOperations",
"FiniteEndorelationOperations",
"FiniteMonotoneMapProperties",
"FiniteEndorelationProperties",
"FinitePosetOperations",
"FinitePosetConstructors",
"FinitePosetSubsetProperties",
"FinitePosetProperties",
"FiniteMapOperations",
"FinitePosetSubsetOperations",
"FiniteNaturalTransformation",
"Semigroup",
"Lattice", "JoinSemilattice", "Adjunction",
'MeetSemilattice', "MonoidalPosetOperations", "SemiCategory",
"SemiBiCategory", "Poset", "Category", "CategoryOperations", 'DP', 'DPCategory', 'DPI',
"DPConstructors", "InvalidFormat",
]
class Setoid(ABC):
"""
A setoid is something to which elements may belong,
has a way of distinguishing elements,
and is able to (de)serialize its elements.
"""
@abstractmethod
def contains(self, x: Element) -> bool:
""" Returns true if the element is in the set. """
def equal(self, x: Element, y: Element) -> bool:
""" Returns True if the two elements are to be considered equal. """
return x == y # default is to use the Python equality
def apart(self, x: Element, y: Element) -> bool:
return not self.equal(x, y)
@abstractmethod
def save(self, h: IOHelper, x: Element) -> ConcreteRepr:
...
@abstractmethod
def load(self, h: IOHelper, o: ConcreteRepr) -> Element:
...
class Mapping(ABC):
@abstractmethod
def source(self) -> Setoid:
...
@abstractmethod
def target(self) -> Setoid:
...
@abstractmethod
def __call__(self, a: Element) -> Element:
...
class EnumerableSet(Setoid, ABC):
@abstractmethod
def elements(self) -> Iterator[Element]:
""" Note: possibly non-terminating. """
class FiniteSet(EnumerableSet, ABC):
""" A finite set has a finite size. """
@abstractmethod
def size(self) -> int:
""" Return the size of the finite set. """
class FiniteMap(Mapping, ABC):
@abstractmethod
def source(self) -> FiniteSet:
...
@abstractmethod
def target(self) -> FiniteSet:
...
class InvalidFormat(Exception):
""" Raise this if the input data to parse is invalid. """
class InvalidValue(ValueError):
""" Raise this if the input does not make sense. """
class FiniteSetProperties(ABC):
@abstractmethod
def is_subset(self, a: FiniteSet, b: FiniteSet) -> bool:
""" True if `a` is a subset of `b`. """
def is_equal(self, a: FiniteSet, b: FiniteSet) -> bool:
return self.is_subset(a, b) and self.is_subset(b, a)
def is_strict_subset(self, a: FiniteSet, b: FiniteSet) -> bool:
return self.is_subset(a, b) and not self.is_subset(b, a)
class SetProduct(Setoid, ABC):
""" A set product is a setoid that can be factorized. """
@abstractmethod
def components(self) -> List[Setoid]:
""" Returns the components of the product"""
@abstractmethod
def pack(self, *args: Element) -> Element:
""" Packs an element of each setoid into an element of the mapping"""
@abstractmethod
def projections(self) -> List[Mapping]:
""" Returns the projection mappings. """
class FiniteSetProduct(FiniteSet, SetProduct, ABC):
""" Specialization of SetProduct where we deal with FiniteSets"""
@abstractmethod
def components(self) -> List[FiniteSet]:
""" Returns the components """
@abstractmethod
def projections(self) -> List[FiniteMap]:
""" Returns the projection mappings. """
class SetUnion(Setoid, ABC):
""" A set product is a setoid that can be factorized. """
@abstractmethod
def components(self) -> List[Setoid]:
""" Returns the components of the union"""
class EnumerableSetUnion(EnumerableSet, SetUnion, ABC):
""" Specialization of SetUnion where we deal with FiniteSets"""
@abstractmethod
def components(self) -> List[EnumerableSet]:
""" Returns the components of the union """
class FiniteSetUnion(FiniteSet, EnumerableSetUnion, ABC):
""" Specialization of SetUnion where we deal with FiniteSets"""
@abstractmethod
def components(self) -> List[FiniteSet]:
""" Returns the components of the union """
class SetDisjointUnion(Setoid, ABC):
@abstractmethod
def components(self) -> List[Setoid]:
""" Returns the components of the union """
@abstractmethod
def injections(self) -> List[Mapping]:
""" Returns the projection mappings. """
class FiniteSetDisjointUnion(FiniteSet, SetDisjointUnion, ABC):
""" Specialization of SetProduct where we deal with FiniteSets"""
@abstractmethod
def components(self) -> List[FiniteSet]:
...
@abstractmethod
def injections(self) -> List[FiniteMap]:
...
class MakeSetProduct(ABC):
@overload
def product(self, components: List[Setoid]) -> SetProduct:
...
@abstractmethod
def product(self, components: List[FiniteSet]) -> FiniteSetProduct:
...
class MakeSetIntersection(ABC):
@abstractmethod
def intersection(self, components: List[FiniteSet]) -> FiniteSet:
...
class MakeSetUnion(ABC):
@overload
def union(self, components: List[FiniteSet]) -> FiniteSetUnion:
...
@overload
def union(self, components: List[EnumerableSet]) -> EnumerableSetUnion:
...
@abstractmethod
def union(self, components: List[Setoid]) -> SetUnion:
...
# added
class MakeSetDisjointUnion(ABC):
@overload
def compute_disjoint_union(self, components: List[Setoid]) -> SetDisjointUnion:
...
@abstractmethod
def compute_disjoint_union(
self, components: List[FiniteSet]
) -> FiniteSetDisjointUnion:
...
class Relation(ABC):
@abstractmethod
def source(self) -> Setoid:
""" Returns a setoid """
@abstractmethod
def target(self) -> Setoid:
""" Returns a setoid """
@abstractmethod
def holds(self, a: Element, b: Element) -> bool:
""" Returns true if the two elements are related """
class FiniteRelation(Relation, ABC):
@abstractmethod
def source(self) -> FiniteSet:
""" Returns a finite set"""
@abstractmethod
def target(self) -> FiniteSet:
""" Returns a finite set"""
class FiniteRelationProperties(ABC):
@abstractmethod
def is_surjective(self, fr: FiniteRelation) -> bool:
""" Return True if the relation is surjective. """
@abstractmethod
def is_injective(self, fr: FiniteRelation) -> bool:
""" Return True if the relation is injective. """
@abstractmethod
def is_defined_everywhere(self, fr: FiniteRelation) -> bool:
""" Return True if the relation is defined everywhere. """
@abstractmethod
def is_single_valued(self, fr: FiniteRelation) -> bool:
""" Return True if the relation is single-valued """
class FiniteRelationOperations(ABC):
@abstractmethod
def transpose(self, fr: FiniteRelation) -> FiniteRelation:
""" Create the transposed of a relation """
class FiniteEndorelationProperties(ABC):
@abstractmethod
def is_reflexive(self, fr: FiniteRelation) -> bool:
""" Return True if the relation is reflexive. """
@abstractmethod
def is_irreflexive(self, fr: FiniteRelation) -> bool:
""" Return True if the relation is irreflexive. """
@abstractmethod
def is_transitive(self, fr: FiniteRelation) -> bool:
""" Return True if the relation is transitive. """
@abstractmethod
def is_symmetric(self, fr: FiniteRelation) -> bool:
""" Return True if the relation is symmetric """
@abstractmethod
def is_antisymmetric(self, fr: FiniteRelation) -> bool:
""" Return True if the relation is antisymmetric """
@abstractmethod
def is_asymmetric(self, fr: FiniteRelation) -> bool:
""" Return True if the relation is asymmetric """
class FiniteEndorelationOperations(ABC):
@abstractmethod
def transitive_closure(self, fr: FiniteRelation) -> FiniteRelation:
""" Returns transitive closure """
class FiniteMapOperations(ABC):
@abstractmethod
def compose(self, f: FiniteMap, g: FiniteMap) -> FiniteMap:
""" compose two functions"""
@abstractmethod
def as_relation(self, f: FiniteMap) -> FiniteRelation:
""" Load the data """
class Semigroup(ABC):
@abstractmethod
def carrier(self) -> Setoid:
...
@abstractmethod
def compose(self, a: Element, b: Element) -> Element:
...
class FiniteSemigroup(Semigroup, ABC):
@abstractmethod
def carrier(self) -> FiniteSet:
...
class FiniteSemigroupConstruct(ABC):
@abstractmethod
def free(self, fs: FiniteSet) -> FiniteSemigroup:
""" Construct the free semigroup on a set. """
class Monoid(Semigroup, ABC):
@abstractmethod
def identity(self) -> Element:
...
class Group(Monoid, ABC):
@abstractmethod
def inverse(self, e: Element) -> Element:
""" Returns the inverse of an element"""
class FiniteMonoid(Monoid, FiniteSemigroup, ABC):
""""""
class FiniteGroup(Group, FiniteMonoid, ABC):
...
# TODO: equational theories
class Poset(ABC):
@abstractmethod
def carrier(self) -> Setoid:
...
@abstractmethod
def leq(self, a: Element, b: Element) -> bool:
...
class FinitePoset(Poset, ABC):
""" Implementation of finite posets. """
@abstractmethod
def carrier(self) -> FiniteSet:
...
class FinitePosetProperties(ABC):
@abstractmethod
def width(self, fp: FinitePoset) -> int:
""" Return the width of the poset. """
@abstractmethod
def height(self, fp: FinitePoset) -> int:
""" Return the height of the poset. """
class FinitePosetConstructors(ABC):
@abstractmethod
def discrete(self, s: FiniteSet) -> FinitePoset:
""" Creates the discrete poset from any set. """
@abstractmethod
def powerset(self, s: FiniteSet) -> FinitePoset:
""" Creates the powerset poset """
@abstractmethod
def uppersets(self, s: FinitePoset) -> FinitePoset:
""" Creates the upperset poset """
@abstractmethod
def lowersets(self, s: FinitePoset) -> FinitePoset:
""" Creates the lowersets poset """
@abstractmethod
def antichains(self, s: FinitePoset) -> FiniteSet:
""" Creates the antichain set """
@abstractmethod
def intervals(self, s: FinitePoset) -> FinitePoset:
""" Computes the poset of intervals. """
@abstractmethod
def intervals2(self, s: FinitePoset) -> FinitePoset:
""" Computes the other of intervals. """
class FinitePosetSubsetProperties(ABC):
@abstractmethod
def is_chain(self, fp: FinitePoset, s: FiniteSet) -> bool:
""" Computes if the subset is a chain. """
@abstractmethod
def is_antichain(self, fp: FinitePoset, s: FiniteSet) -> bool:
""" Computes if the subset is an antichain. """
class FinitePosetSubsetOperations(ABC):
@abstractmethod
def upperclosure(self, fp: FinitePoset, s: Set[Element]) -> Set[Element]:
""" Computes the upper closure of an element"""
@abstractmethod
def lowerclosure(self, fp: FinitePoset, s: Set[Element]) -> Set[Element]:
""" Computes the lower closure of an element"""
@abstractmethod
def maximal(self, fp: FinitePoset, s: Set[Element]) -> Set[Element]:
""" Computes the maximal elements in a subset of the poset"""
@abstractmethod
def minimal(self, fp: FinitePoset, s: Set[Element]) -> Set[Element]:
""" Computes the minimal elements in a subset of the poset"""
@abstractmethod
def infimum(self, fp: FinitePoset, s: Set[Element]) -> Optional[Element]:
""" Computes the infimum for the subset, or None if one does not exist. """
@abstractmethod
def supremum(self, fp: FinitePoset, s: Set[Element]) -> Optional[Element]:
""" Computes the supremum for the subset, or None if one does not exist. """
@abstractmethod
def meet(self, fp: FinitePoset, s: Set[Element]) -> Optional[Element]:
""" Computes the meet for the subset, or None if one does not exist. """
@abstractmethod
def join(self, fp: FinitePoset, s: Set[Element]) -> Optional[Element]:
""" Computes the join for the subset, or None if one does not exist. """
class FinitePosetOperations(ABC):
@abstractmethod
def opposite(self, s: str) -> FinitePoset:
...
@abstractmethod
def product(self, p1: FinitePoset, p2: FinitePoset) -> FinitePoset:
...
@abstractmethod
def disjoint_union(self, p1: FinitePoset, p2: FinitePoset) -> FinitePoset:
...
class MonotoneMap(Mapping, ABC):
def source_poset(self) -> Poset:
...
def target_poset(self) -> Poset:
...
class FiniteMonotoneMapProperties(ABC):
@abstractmethod
def is_monotone(self, p1: FinitePoset, p2: FinitePoset, m: FiniteMap) -> bool:
""" Check if a map is monotone. """
@abstractmethod
def is_antitone(self, p1: FinitePoset, p2: FinitePoset, m: FiniteMap) -> bool:
""" Check if a map is antitone. """
class FiniteMonoidalPoset(ABC):
""" Implementation of finite posets. """
@abstractmethod
def poset(self) -> FinitePoset:
...
@abstractmethod
def monoid(self) -> FiniteMonoid:
...
class MonoidalPosetOperations(ABC):
""" Implementation of finite posets. """
@abstractmethod
def is_monoidal_poset(self, fp: FinitePoset, fm: FiniteMonoid) -> bool:
""" Check that the pair of poset and monoid make together a monoidal poset."""
class MeetSemilattice(Poset, ABC):
@abstractmethod
def meet(self, x: Element, y: Element) -> Element:
...
@abstractmethod
def top(self) -> Element:
...
class JoinSemilattice(Poset, ABC):
@abstractmethod
def join(self, x: Element, y: Element) -> Element:
...
@abstractmethod
def bottom(self) -> Element:
...
class Lattice(JoinSemilattice, MeetSemilattice, ABC):
pass
class FiniteLattice(ABC):
@abstractmethod
def carrier(self) -> FiniteSet:
...
class SemiBiCategory(ABC):
@abstractmethod
def objects(self) -> Setoid:
...
@abstractmethod
def homs(self, ob1: Object, ob2: Object) -> Setoid:
...
@abstractmethod
def legs(self, m: Morphism) -> Tuple[Object, Object]:
""" Return source and target of the morphism """
class SemiCategory(SemiBiCategory, ABC):
...
class Category(SemiCategory, ABC):
@abstractmethod
def identity(self, ob: Object) -> Morphism:
""" Identity for the object """
assert [1] + [1, 2] == [1, 1, 2]
assert [1, 1, 2] + [] == [1, 1, 2]
class FiniteSemiCategory(SemiCategory, ABC):
@abstractmethod
def objects(self) -> FiniteSet:
...
@abstractmethod
def homs(self, ob1: Object, ob2: Object) -> FiniteSet:
...
class FiniteCategory(FiniteSemiCategory, Category, ABC):
...
class CategoryOperations:
@abstractmethod
def product(self, c1: Category, c2: Category) -> Category:
""" Product of two categories. """
@abstractmethod
def disjoint_union(self, c1: Category, c2: Category) -> FiniteCategory:
""" Disjoint union for the categories """
@abstractmethod
def arrow(self, c1: Category) -> Category:
""" Computes the arrow category """
@abstractmethod
def twisted_arrow(self, c1: Category) -> Category:
""" Computes the twisted arrow category """
class FiniteCategoryOperations:
@abstractmethod
def product(self, c1: FiniteCategory, c2: FiniteCategory) -> FiniteCategory:
""" Product of two categories. """
@abstractmethod
def disjoint_union(self, c1: FiniteCategory, c2: FiniteCategory) -> FiniteCategory:
""" Disjoint union for the categories """
@abstractmethod
def arrow(self, c1: FiniteCategory) -> FiniteCategory:
""" Computes the arrow category """
@abstractmethod
def twisted_arrow(self, c1: FiniteCategory) -> FiniteCategory:
""" Computes the twisted arrow category """
class Functor(ABC):
@abstractmethod
def source(self) -> Category:
...
@abstractmethod
def target(self) -> Category:
...
@abstractmethod
def f_ob(self, ob: Object) -> Object:
""" Effect on objects """
@abstractmethod
def f_mor(self, m: Morphism) -> Morphism:
""" Effect on morphisms """
class FiniteFunctor(Functor, ABC):
@abstractmethod
def source(self) -> FiniteCategory:
...
@abstractmethod
def target(self) -> FiniteCategory:
...
class MonoidalCategory(Category, ABC):
@abstractmethod
def monoidal_unit(self) -> Object:
""" Return the product functor. """
@abstractmethod
def monoidal_product(self) -> FiniteFunctor:
""" Return the product functor. """
class FiniteMonoidalCategory(MonoidalCategory, FiniteCategory, ABC):
...
class NaturalTransformation(ABC):
@abstractmethod
def cat1(self) -> Category:
...
@abstractmethod
def cat2(self) -> Category:
...
@abstractmethod
def component(self, ob: Object) -> Morphism:
"""Returns the component for a particular object in the first category.
This is a morphism in the second category.
"""
class FiniteNaturalTransformation(NaturalTransformation, ABC):
@abstractmethod
def cat1(self) -> FiniteCategory:
...
@abstractmethod
def cat2(self) -> FiniteCategory:
...
class Adjunction(ABC):
@abstractmethod
def source(self) -> Category:
...
@abstractmethod
def target(self) -> Category:
...
@abstractmethod
def left(self) -> Functor:
pass
@abstractmethod
def right(self) -> Functor:
pass
class FiniteAdjunction(Adjunction, ABC):
@abstractmethod
def source(self) -> FiniteCategory:
...
@abstractmethod
def target(self) -> FiniteCategory:
...
@abstractmethod
def left(self) -> FiniteFunctor:
pass
@abstractmethod
def right(self) -> FiniteFunctor:
pass
class FiniteAdjunctionsOperations(ABC):
@abstractmethod
def is_adjunction(self, left: FiniteFunctor, right: FiniteFunctor) -> bool:
""" check the pair is an adjunction """
@abstractmethod
def compose(
self, adj1: FiniteAdjunction, adj2: FiniteAdjunction
) -> FiniteAdjunction:
""" compose two compatible adjunctions"""
@abstractmethod
def from_relation(self, f: FiniteRelation) -> FiniteAdjunction:
...
class DPI(ABC):
@abstractmethod
def functionality(self) -> Poset:
...
@abstractmethod
def implementations(self) -> Setoid:
...
@abstractmethod
def costs(self) -> Poset:
...
@abstractmethod
def requires(self) -> Mapping:
...
@abstractmethod
def provides(self) -> Mapping:
...
class DPCategory(Category, ABC):
pass
class DP(ABC):
pass
class FiniteDP(ABC):
pass
class DPConstructors(ABC):
@abstractmethod
def companion(self, f: MonotoneMap) -> DP:
pass
@abstractmethod
def conjoint(self, f: MonotoneMap) -> DP:
pass
class FiniteDPOperations(ABC):
@abstractmethod
def series(self, dp1: FiniteDP, dp2: FiniteDP) -> FiniteDP:
pass
@abstractmethod
def union(self, dp1: FiniteDP, dp2: FiniteDP) -> FiniteDP:
pass
@abstractmethod
def intersection(self, dp1: FiniteDP, dp2: FiniteDP) -> FiniteDP:
pass
@abstractmethod
def from_relation(self, f: FiniteRelation) -> FiniteDP:
...
class Profunctor(ABC):
def source(self) -> Category:
...
def target(self) -> Category:
...
def functor(self) -> Functor:
...
class FiniteProfunctor(ABC):
def cat1(self) -> FiniteCategory:
...
def cat2(self) -> FiniteCategory:
...
def functor(self) -> FiniteFunctor:
...
class FiniteProfunctorOperations(ABC):
@abstractmethod
def series(self, p1: FiniteProfunctor, p2: FiniteProfunctor) -> FiniteProfunctor:
...
class FiniteEnrichedCategory(FiniteCategory, ABC):
def enrichment(self) -> FiniteMonoidalCategory:
...
class SetoidOperations(ABC):
@classmethod
@abstractmethod
def union_setoids(cls, a: Setoid, b: Setoid) -> Setoid:
""" Creates the union of two Setoids. """
@classmethod
@abstractmethod
def intersection_setoids(cls, a: Setoid, b: Setoid) -> Setoid:
""" Creates the intersection of two Setoids. """
class EnumerableSetsOperations(ABC):
@classmethod
@abstractmethod
def make_set_sequence(cls, f: Callable[[int], object]):
"""Creates an EnumerableSet from a function that gives the
i-th element."""
@classmethod
@abstractmethod
def union_esets(cls, a: EnumerableSet, b: EnumerableSet) -> EnumerableSet:
""" Creates the union of two EnumerableSet. """
| ACT4E-exercises | /ACT4E-exercises-2021.1.2103061245.tar.gz/ACT4E-exercises-2021.1.2103061245/src/act4e_interfaces/finite.py | finite.py |
# This branch is currently non functional due to changes to core. Please use the R branch for the time being.
# Installation
### Setting Up the Environment (Preinstallation)
**For Linux Users**
Verify that the cmake version you are using is >=3.19.
For the optimal performance on Intel-based architectures, installing [Intel Math Kernel Library (MKL)](https://software.intel.com/content/www/us/en/develop/articles/intel-math-kernel-library-intel-mkl-2020-install-guide.html) is **highly** recommended. After installing, make sure `MKLROOT` is defined by running the [setvars](https://software.intel.com/content/www/us/en/develop/documentation/using-configuration-file-for-setvars-sh/top.html) script.
**Install library dependencies**
To install the `ACTIONet` dependencie on debian-based linux machines, run:
```bash
sudo apt-get install libhdf5-dev libsuitesparse-dev libnss3 xvfb
```
For Mac-based systems, you can use [brew](https://brew.sh/) instead:
```bash
brew install hdf5 suite-sparse c-blosc
```
### Installing ACTIONet Python Package
Use `pip` to install ACTIONet directly from this repository:
```bash
pip install git+https://github.com/shmohammadi86/ACTIONet@python-devel
```
To install from source:
```
git clone --recurse-submodules https://github.com/shmohammadi86/ACTIONet.git
git submodule update --init
python setup.py build
python setup.py develop
```
# Running ACTIONet
**Note** If you are using `MKL`, make sure to properly [set the number of threads](https://software.intel.com/content/www/us/en/develop/documentation/mkl-macos-developer-guide/top/managing-performance-and-memory/improving-performance-with-threading/techniques-to-set-the-number-of-threads.html) used prior to running `ACTIONet`.
## Example Run
Here is a simple example to get you started:
```python
import urllib.request
import ACTIONet as an
import scanpy as sc
# Download example dataset from the 10X Genomics website
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
urllib.request.urlretrieve('http://cf.10xgenomics.com/samples/cell-exp/3.0.0/pbmc_10k_v3/pbmc_10k_v3_filtered_feature_bc_matrix.h5', 'pbmc_10k_v3.h5')
# Read and filter the data
adata = sc.read_10x_h5('pbmc_10k_v3.h5')
adata.var_names_make_unique(join='.')
an.pp.filter_adata(adata, min_cells_per_feature=0.01, min_features_per_cell=1000)
sc.pp.normalize_total(adata)
sc.pp.log1p(adata)
# Run ACTIONet
an.pp.reduce_kernel(adata)
an.run_ACTIONet(adata)
# Annotate cell-types
marker_genes, directions, names = an.tl.load_markers('PBMC_Monaco2019_12celltypes')
cell_labels, confidences, Z = an.tl.annotate_cells_using_markers(adata, marker_genes, directions, names)
adata.obs['celltypes'] = cell_labels
# Visualize output
an.pl.plot_ACTIONet(adata, 'celltypes', transparency_key='node_centrality')
# Export results
adata.write('pbmc_10k_v3.h5ad')
```
## Visualizing results using cellxgene
The output of ACTIONet in the python implementation is internally stored as as `AnnData` object, and R `ACE` objects can be imported from/exported to `AnnData` using functions `AnnData2ACE()` and `ACE2AnnData()` functions, respectively. `AnnData` objects can be directly loaded into [cellxgene](https://github.com/chanzuckerberg/cellxgene) package, an open-source viewer for interactive single-cell data visualization. `cellxgene` can be installed as:
```bash
pip install cellxgene
```
Then to visualize the results of ACTIONet, run:
```bash
cellxgene launch pbmc_10k_v3.h5ad
```
where *pbmc_10k_v3.h5ad* is the name of the file we exported using `adata.write()` function.
# Additional tutorials
You can access ACTIONet tutorials from:
1. [ACTIONet framework at a glance (human PBMC 3k dataset)](http://compbio.mit.edu/ACTIONet/tutorials/mini_intro.html)
2. [Introduction to the ACTIONet framework (human PBMC Granja et al. dataset)](http://compbio.mit.edu/ACTIONet/tutorials/intro.html)
3. [Introduction to cluster-centric analysis using the ACTIONet framework](http://compbio.mit.edu/ACTIONet/tutorials/clustering.html)
4. [To batch correct or not to batch correct, that is the question!](http://compbio.mit.edu/ACTIONet/tutorials/batch.html)
5. [PortingData: Import/export options in the ACTIONet framework](http://compbio.mit.edu/ACTIONet/tutorials/porting_data.html)
6. [Interactive visualization, annotation, and exploration](http://compbio.mit.edu/ACTIONet/tutorials/annotation.html)
7. [Constructing cell-type/cell-state-specific networks using SCINET](http://compbio.mit.edu/ACTIONet/tutorials/scinet.html)
You can also find a [step-by-step guide](http://compbio.mit.edu/ACTIONet/tutorials/guide.html) to learning the core functionalities of the ACTIONet framework.
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/README.md | README.md |
import multiprocessing
import os
import platform
import re
import sys
import subprocess
import distutils
from setuptools import find_packages, setup, Extension
from setuptools.command.build_ext import build_ext
__version__ = '0.1.1'
def parallelCCompile(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None):
# those lines are copied from distutils.ccompiler.CCompiler directly
macros, objects, extra_postargs, pp_opts, build = self._setup_compile(output_dir, macros, include_dirs, sources, depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
# parallel code
N=multiprocessing.cpu_count() - 2
import multiprocessing.pool
def _single_compile(obj):
try: src, ext = build[obj]
except KeyError: return
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
# convert to list, imap is evaluated on-demand
list(multiprocessing.pool.ThreadPool(N).imap(_single_compile,objects))
return objects
distutils.ccompiler.CCompiler.compile=parallelCCompile
def read(path):
with open(path, 'r') as f:
return f.read()
def get_numpy_include():
import numpy
return numpy.get_include()
# Classes below were taken from https://github.com/pybind/cmake_example
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
# required for auto-detection of auxiliary "native" libs
if not extdir.endswith(os.path.sep):
extdir += os.path.sep
cmake_args = [
'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable,
'-DNUMPY_INCLUDE_DIRS=' + get_numpy_include(),
]
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j' + str(multiprocessing.cpu_count())]
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
setup(
name='ACTIONet',
version=__version__,
author='Shahin Mohammadi',
author_email='shahin.mohammadi@gmail.com',
url='https://github.com/shmohammadi86/ACTIONet',
description='ACTIONet single-cell analysis framework',
long_description=read('README.md'),
long_description_content_type='text/markdown',
keywords='',
python_requires='>=3.8',
packages=find_packages(),
ext_modules=[CMakeExtension('_ACTIONet')],
cmdclass=dict(build_ext=CMakeBuild),
install_requires=read('requirements.txt').strip().split('\n'),
setup_requires=['numpy'],
zip_safe=False,
include_package_data=True,
classifiers=[],
)
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/setup.py | setup.py |
Make sure you've completed the following steps before submitting your issue -- thank you!
1. Check if your question has already been answered in the [FAQ](http://pybind11.readthedocs.io/en/latest/faq.html) section.
2. Make sure you've read the [documentation](http://pybind11.readthedocs.io/en/latest/). Your issue may be addressed there.
3. If those resources didn't help and you only have a short question (not a bug report), consider asking in the [Gitter chat room](https://gitter.im/pybind/Lobby).
4. If you have a genuine bug report or a more complex question which is not answered in the previous items (or not suitable for chat), please fill in the details below.
5. Include a self-contained and minimal piece of code that reproduces the problem. If that's not possible, try to make the description as clear as possible.
*After reading, remove this checklist and the template text in parentheses below.*
## Issue description
(Provide a short description, state the expected behavior and what actually happens.)
## Reproducible example code
(The code should be minimal, have no external dependencies, isolate the function(s) that cause breakage. Submit matched and complete C++ and Python snippets that can be easily compiled and run to diagnose the issue.)
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/ISSUE_TEMPLATE.md | ISSUE_TEMPLATE.md |

# pybind11 — Seamless operability between C++11 and Python
[](http://pybind11.readthedocs.org/en/master/?badge=master)
[](http://pybind11.readthedocs.org/en/stable/?badge=stable)
[](https://gitter.im/pybind/Lobby)
[](https://travis-ci.org/pybind/pybind11)
[](https://ci.appveyor.com/project/wjakob/pybind11)
**pybind11** is a lightweight header-only library that exposes C++ types in Python
and vice versa, mainly to create Python bindings of existing C++ code. Its
goals and syntax are similar to the excellent
[Boost.Python](http://www.boost.org/doc/libs/1_58_0/libs/python/doc/) library
by David Abrahams: to minimize boilerplate code in traditional extension
modules by inferring type information using compile-time introspection.
The main issue with Boost.Python—and the reason for creating such a similar
project—is Boost. Boost is an enormously large and complex suite of utility
libraries that works with almost every C++ compiler in existence. This
compatibility has its cost: arcane template tricks and workarounds are
necessary to support the oldest and buggiest of compiler specimens. Now that
C++11-compatible compilers are widely available, this heavy machinery has
become an excessively large and unnecessary dependency.
Think of this library as a tiny self-contained version of Boost.Python with
everything stripped away that isn't relevant for binding generation. Without
comments, the core header files only require ~4K lines of code and depend on
Python (2.7 or 3.x, or PyPy2.7 >= 5.7) and the C++ standard library. This
compact implementation was possible thanks to some of the new C++11 language
features (specifically: tuples, lambda functions and variadic templates). Since
its creation, this library has grown beyond Boost.Python in many ways, leading
to dramatically simpler binding code in many common situations.
Tutorial and reference documentation is provided at
[http://pybind11.readthedocs.org/en/master](http://pybind11.readthedocs.org/en/master).
A PDF version of the manual is available
[here](https://media.readthedocs.org/pdf/pybind11/master/pybind11.pdf).
## Core features
pybind11 can map the following core C++ features to Python
- Functions accepting and returning custom data structures per value, reference, or pointer
- Instance methods and static methods
- Overloaded functions
- Instance attributes and static attributes
- Arbitrary exception types
- Enumerations
- Callbacks
- Iterators and ranges
- Custom operators
- Single and multiple inheritance
- STL data structures
- Smart pointers with reference counting like ``std::shared_ptr``
- Internal references with correct reference counting
- C++ classes with virtual (and pure virtual) methods can be extended in Python
## Goodies
In addition to the core functionality, pybind11 provides some extra goodies:
- Python 2.7, 3.x, and PyPy (PyPy2.7 >= 5.7) are supported with an
implementation-agnostic interface.
- It is possible to bind C++11 lambda functions with captured variables. The
lambda capture data is stored inside the resulting Python function object.
- pybind11 uses C++11 move constructors and move assignment operators whenever
possible to efficiently transfer custom data types.
- It's easy to expose the internal storage of custom data types through
Pythons' buffer protocols. This is handy e.g. for fast conversion between
C++ matrix classes like Eigen and NumPy without expensive copy operations.
- pybind11 can automatically vectorize functions so that they are transparently
applied to all entries of one or more NumPy array arguments.
- Python's slice-based access and assignment operations can be supported with
just a few lines of code.
- Everything is contained in just a few header files; there is no need to link
against any additional libraries.
- Binaries are generally smaller by a factor of at least 2 compared to
equivalent bindings generated by Boost.Python. A recent pybind11 conversion
of PyRosetta, an enormous Boost.Python binding project,
[reported](http://graylab.jhu.edu/RosettaCon2016/PyRosetta-4.pdf) a binary
size reduction of **5.4x** and compile time reduction by **5.8x**.
- Function signatures are precomputed at compile time (using ``constexpr``),
leading to smaller binaries.
- With little extra effort, C++ types can be pickled and unpickled similar to
regular Python objects.
## Supported compilers
1. Clang/LLVM 3.3 or newer (for Apple Xcode's clang, this is 5.0.0 or newer)
2. GCC 4.8 or newer
3. Microsoft Visual Studio 2015 Update 3 or newer
4. Intel C++ compiler 17 or newer (16 with pybind11 v2.0 and 15 with pybind11 v2.0 and a [workaround](https://github.com/pybind/pybind11/issues/276))
5. Cygwin/GCC (tested on 2.5.1)
## About
This project was created by [Wenzel Jakob](http://rgl.epfl.ch/people/wjakob).
Significant features and/or improvements to the code were contributed by
Jonas Adler,
Lori A. Burns,
Sylvain Corlay,
Trent Houliston,
Axel Huebl,
@hulucc,
Sergey Lyskov
Johan Mabille,
Tomasz Miąsko,
Dean Moldovan,
Ben Pritchard,
Jason Rhinelander,
Boris Schäling,
Pim Schellart,
Henry Schreiner,
Ivan Smirnov, and
Patrick Stewart.
### License
pybind11 is provided under a BSD-style license that can be found in the
``LICENSE`` file. By using, distributing, or contributing to this project,
you agree to the terms and conditions of this license.
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/README.md | README.md |
Thank you for your interest in this project! Please refer to the following
sections on how to contribute code and bug reports.
### Reporting bugs
At the moment, this project is run in the spare time of a single person
([Wenzel Jakob](http://rgl.epfl.ch/people/wjakob)) with very limited resources
for issue tracker tickets. Thus, before submitting a question or bug report,
please take a moment of your time and ensure that your issue isn't already
discussed in the project documentation provided at
[http://pybind11.readthedocs.org/en/latest](http://pybind11.readthedocs.org/en/latest).
Assuming that you have identified a previously unknown problem or an important
question, it's essential that you submit a self-contained and minimal piece of
code that reproduces the problem. In other words: no external dependencies,
isolate the function(s) that cause breakage, submit matched and complete C++
and Python snippets that can be easily compiled and run on my end.
## Pull requests
Contributions are submitted, reviewed, and accepted using Github pull requests.
Please refer to [this
article](https://help.github.com/articles/using-pull-requests) for details and
adhere to the following rules to make the process as smooth as possible:
* Make a new branch for every feature you're working on.
* Make small and clean pull requests that are easy to review but make sure they
do add value by themselves.
* Add tests for any new functionality and run the test suite (``make pytest``)
to ensure that no existing features break.
* Please run ``flake8`` and ``tools/check-style.sh`` to check your code matches
the project style. (Note that ``check-style.sh`` requires ``gawk``.)
* This project has a strong focus on providing general solutions using a
minimal amount of code, thus small pull requests are greatly preferred.
### Licensing of contributions
pybind11 is provided under a BSD-style license that can be found in the
``LICENSE`` file. By using, distributing, or contributing to this project, you
agree to the terms and conditions of this license.
You are under no obligation whatsoever to provide any bug fixes, patches, or
upgrades to the features, functionality or performance of the source code
("Enhancements") to anyone; however, if you choose to make your Enhancements
available either publicly, or directly to the author of this software, without
imposing a separate written license agreement for such Enhancements, then you
hereby grant the following license: a non-exclusive, royalty-free perpetual
license to install, use, modify, prepare derivative works, incorporate into
other computer software, distribute, and sublicense such enhancements or
derivative works thereof, in binary and source code form.
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/CONTRIBUTING.md | CONTRIBUTING.md |
#!/usr/bin/env python
# Setup script for PyPI; use CMakeFile.txt to build extension modules
from setuptools import setup
from distutils.command.install_headers import install_headers
from distutils.command.build_py import build_py
from pybind11 import __version__
import os
package_data = [
'include/pybind11/detail/class.h',
'include/pybind11/detail/common.h',
'include/pybind11/detail/descr.h',
'include/pybind11/detail/init.h',
'include/pybind11/detail/internals.h',
'include/pybind11/detail/typeid.h',
'include/pybind11/attr.h',
'include/pybind11/buffer_info.h',
'include/pybind11/cast.h',
'include/pybind11/chrono.h',
'include/pybind11/common.h',
'include/pybind11/complex.h',
'include/pybind11/eigen.h',
'include/pybind11/embed.h',
'include/pybind11/eval.h',
'include/pybind11/functional.h',
'include/pybind11/iostream.h',
'include/pybind11/numpy.h',
'include/pybind11/operators.h',
'include/pybind11/options.h',
'include/pybind11/pybind11.h',
'include/pybind11/pytypes.h',
'include/pybind11/stl.h',
'include/pybind11/stl_bind.h',
]
# Prevent installation of pybind11 headers by setting
# PYBIND11_USE_CMAKE.
if os.environ.get('PYBIND11_USE_CMAKE'):
headers = []
else:
headers = package_data
class InstallHeaders(install_headers):
"""Use custom header installer because the default one flattens subdirectories"""
def run(self):
if not self.distribution.headers:
return
for header in self.distribution.headers:
subdir = os.path.dirname(os.path.relpath(header, 'include/pybind11'))
install_dir = os.path.join(self.install_dir, subdir)
self.mkpath(install_dir)
(out, _) = self.copy_file(header, install_dir)
self.outfiles.append(out)
# Install the headers inside the package as well
class BuildPy(build_py):
def build_package_data(self):
build_py.build_package_data(self)
for header in package_data:
target = os.path.join(self.build_lib, 'pybind11', header)
self.mkpath(os.path.dirname(target))
self.copy_file(header, target, preserve_mode=False)
setup(
name='pybind11',
version=__version__,
description='Seamless operability between C++11 and Python',
author='Wenzel Jakob',
author_email='wenzel.jakob@epfl.ch',
url='https://github.com/pybind/pybind11',
download_url='https://github.com/pybind/pybind11/tarball/v' + __version__,
packages=['pybind11'],
license='BSD',
headers=headers,
zip_safe=False,
cmdclass=dict(install_headers=InstallHeaders, build_py=BuildPy),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
'Programming Language :: C++',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: BSD License'
],
keywords='C++11, Python bindings',
long_description="""pybind11 is a lightweight header-only library that
exposes C++ types in Python and vice versa, mainly to create Python bindings of
existing C++ code. Its goals and syntax are similar to the excellent
Boost.Python by David Abrahams: to minimize boilerplate code in traditional
extension modules by inferring type information using compile-time
introspection.
The main issue with Boost.Python-and the reason for creating such a similar
project-is Boost. Boost is an enormously large and complex suite of utility
libraries that works with almost every C++ compiler in existence. This
compatibility has its cost: arcane template tricks and workarounds are
necessary to support the oldest and buggiest of compiler specimens. Now that
C++11-compatible compilers are widely available, this heavy machinery has
become an excessively large and unnecessary dependency.
Think of this library as a tiny self-contained version of Boost.Python with
everything stripped away that isn't relevant for binding generation. Without
comments, the core header files only require ~4K lines of code and depend on
Python (2.7 or 3.x, or PyPy2.7 >= 5.7) and the C++ standard library. This
compact implementation was possible thanks to some of the new C++11 language
features (specifically: tuples, lambda functions and variadic templates). Since
its creation, this library has grown beyond Boost.Python in many ways, leading
to dramatically simpler binding code in many common situations.""")
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/setup.py | setup.py |
Frequently asked questions
##########################
"ImportError: dynamic module does not define init function"
===========================================================
1. Make sure that the name specified in PYBIND11_MODULE is identical to the
filename of the extension library (without prefixes such as .so)
2. If the above did not fix the issue, you are likely using an incompatible
version of Python (for instance, the extension library was compiled against
Python 2, while the interpreter is running on top of some version of Python
3, or vice versa).
"Symbol not found: ``__Py_ZeroStruct`` / ``_PyInstanceMethod_Type``"
========================================================================
See the first answer.
"SystemError: dynamic module not initialized properly"
======================================================
See the first answer.
The Python interpreter immediately crashes when importing my module
===================================================================
See the first answer.
CMake doesn't detect the right Python version
=============================================
The CMake-based build system will try to automatically detect the installed
version of Python and link against that. When this fails, or when there are
multiple versions of Python and it finds the wrong one, delete
``CMakeCache.txt`` and then invoke CMake as follows:
.. code-block:: bash
cmake -DPYTHON_EXECUTABLE:FILEPATH=<path-to-python-executable> .
.. _faq_reference_arguments:
Limitations involving reference arguments
=========================================
In C++, it's fairly common to pass arguments using mutable references or
mutable pointers, which allows both read and write access to the value
supplied by the caller. This is sometimes done for efficiency reasons, or to
realize functions that have multiple return values. Here are two very basic
examples:
.. code-block:: cpp
void increment(int &i) { i++; }
void increment_ptr(int *i) { (*i)++; }
In Python, all arguments are passed by reference, so there is no general
issue in binding such code from Python.
However, certain basic Python types (like ``str``, ``int``, ``bool``,
``float``, etc.) are **immutable**. This means that the following attempt
to port the function to Python doesn't have the same effect on the value
provided by the caller -- in fact, it does nothing at all.
.. code-block:: python
def increment(i):
i += 1 # nope..
pybind11 is also affected by such language-level conventions, which means that
binding ``increment`` or ``increment_ptr`` will also create Python functions
that don't modify their arguments.
Although inconvenient, one workaround is to encapsulate the immutable types in
a custom type that does allow modifications.
An other alternative involves binding a small wrapper lambda function that
returns a tuple with all output arguments (see the remainder of the
documentation for examples on binding lambda functions). An example:
.. code-block:: cpp
int foo(int &i) { i++; return 123; }
and the binding code
.. code-block:: cpp
m.def("foo", [](int i) { int rv = foo(i); return std::make_tuple(rv, i); });
How can I reduce the build time?
================================
It's good practice to split binding code over multiple files, as in the
following example:
:file:`example.cpp`:
.. code-block:: cpp
void init_ex1(py::module &);
void init_ex2(py::module &);
/* ... */
PYBIND11_MODULE(example, m) {
init_ex1(m);
init_ex2(m);
/* ... */
}
:file:`ex1.cpp`:
.. code-block:: cpp
void init_ex1(py::module &m) {
m.def("add", [](int a, int b) { return a + b; });
}
:file:`ex2.cpp`:
.. code-block:: cpp
void init_ex2(py::module &m) {
m.def("sub", [](int a, int b) { return a - b; });
}
:command:`python`:
.. code-block:: pycon
>>> import example
>>> example.add(1, 2)
3
>>> example.sub(1, 1)
0
As shown above, the various ``init_ex`` functions should be contained in
separate files that can be compiled independently from one another, and then
linked together into the same final shared object. Following this approach
will:
1. reduce memory requirements per compilation unit.
2. enable parallel builds (if desired).
3. allow for faster incremental builds. For instance, when a single class
definition is changed, only a subset of the binding code will generally need
to be recompiled.
"recursive template instantiation exceeded maximum depth of 256"
================================================================
If you receive an error about excessive recursive template evaluation, try
specifying a larger value, e.g. ``-ftemplate-depth=1024`` on GCC/Clang. The
culprit is generally the generation of function signatures at compile time
using C++14 template metaprogramming.
.. _`faq:hidden_visibility`:
"‘SomeClass’ declared with greater visibility than the type of its field ‘SomeClass::member’ [-Wattributes]"
============================================================================================================
This error typically indicates that you are compiling without the required
``-fvisibility`` flag. pybind11 code internally forces hidden visibility on
all internal code, but if non-hidden (and thus *exported*) code attempts to
include a pybind type (for example, ``py::object`` or ``py::list``) you can run
into this warning.
To avoid it, make sure you are specifying ``-fvisibility=hidden`` when
compiling pybind code.
As to why ``-fvisibility=hidden`` is necessary, because pybind modules could
have been compiled under different versions of pybind itself, it is also
important that the symbols defined in one module do not clash with the
potentially-incompatible symbols defined in another. While Python extension
modules are usually loaded with localized symbols (under POSIX systems
typically using ``dlopen`` with the ``RTLD_LOCAL`` flag), this Python default
can be changed, but even if it isn't it is not always enough to guarantee
complete independence of the symbols involved when not using
``-fvisibility=hidden``.
Additionally, ``-fvisiblity=hidden`` can deliver considerably binary size
savings. (See the following section for more details).
.. _`faq:symhidden`:
How can I create smaller binaries?
==================================
To do its job, pybind11 extensively relies on a programming technique known as
*template metaprogramming*, which is a way of performing computation at compile
time using type information. Template metaprogamming usually instantiates code
involving significant numbers of deeply nested types that are either completely
removed or reduced to just a few instructions during the compiler's optimization
phase. However, due to the nested nature of these types, the resulting symbol
names in the compiled extension library can be extremely long. For instance,
the included test suite contains the following symbol:
.. only:: html
.. code-block:: none
__ZN8pybind1112cpp_functionC1Iv8Example2JRNSt3__16vectorINS3_12basic_stringIwNS3_11char_traitsIwEENS3_9allocatorIwEEEENS8_ISA_EEEEEJNS_4nameENS_7siblingENS_9is_methodEA28_cEEEMT0_FT_DpT1_EDpRKT2_
.. only:: not html
.. code-block:: cpp
__ZN8pybind1112cpp_functionC1Iv8Example2JRNSt3__16vectorINS3_12basic_stringIwNS3_11char_traitsIwEENS3_9allocatorIwEEEENS8_ISA_EEEEEJNS_4nameENS_7siblingENS_9is_methodEA28_cEEEMT0_FT_DpT1_EDpRKT2_
which is the mangled form of the following function type:
.. code-block:: cpp
pybind11::cpp_function::cpp_function<void, Example2, std::__1::vector<std::__1::basic_string<wchar_t, std::__1::char_traits<wchar_t>, std::__1::allocator<wchar_t> >, std::__1::allocator<std::__1::basic_string<wchar_t, std::__1::char_traits<wchar_t>, std::__1::allocator<wchar_t> > > >&, pybind11::name, pybind11::sibling, pybind11::is_method, char [28]>(void (Example2::*)(std::__1::vector<std::__1::basic_string<wchar_t, std::__1::char_traits<wchar_t>, std::__1::allocator<wchar_t> >, std::__1::allocator<std::__1::basic_string<wchar_t, std::__1::char_traits<wchar_t>, std::__1::allocator<wchar_t> > > >&), pybind11::name const&, pybind11::sibling const&, pybind11::is_method const&, char const (&) [28])
The memory needed to store just the mangled name of this function (196 bytes)
is larger than the actual piece of code (111 bytes) it represents! On the other
hand, it's silly to even give this function a name -- after all, it's just a
tiny cog in a bigger piece of machinery that is not exposed to the outside
world. So we'll generally only want to export symbols for those functions which
are actually called from the outside.
This can be achieved by specifying the parameter ``-fvisibility=hidden`` to GCC
and Clang, which sets the default symbol visibility to *hidden*, which has a
tremendous impact on the final binary size of the resulting extension library.
(On Visual Studio, symbols are already hidden by default, so nothing needs to
be done there.)
In addition to decreasing binary size, ``-fvisibility=hidden`` also avoids
potential serious issues when loading multiple modules and is required for
proper pybind operation. See the previous FAQ entry for more details.
Working with ancient Visual Studio 2008 builds on Windows
=========================================================
The official Windows distributions of Python are compiled using truly
ancient versions of Visual Studio that lack good C++11 support. Some users
implicitly assume that it would be impossible to load a plugin built with
Visual Studio 2015 into a Python distribution that was compiled using Visual
Studio 2008. However, no such issue exists: it's perfectly legitimate to
interface DLLs that are built with different compilers and/or C libraries.
Common gotchas to watch out for involve not ``free()``-ing memory region
that that were ``malloc()``-ed in another shared library, using data
structures with incompatible ABIs, and so on. pybind11 is very careful not
to make these types of mistakes.
How can I properly handle Ctrl-C in long-running functions?
===========================================================
Ctrl-C is received by the Python interpreter, and holds it until the GIL
is released, so a long-running function won't be interrupted.
To interrupt from inside your function, you can use the ``PyErr_CheckSignals()``
function, that will tell if a signal has been raised on the Python side. This
function merely checks a flag, so its impact is negligible. When a signal has
been received, you must either explicitly interrupt execution by throwing
``py::error_already_set`` (which will propagate the existing
``KeyboardInterrupt``), or clear the error (which you usually will not want):
.. code-block:: cpp
PYBIND11_MODULE(example, m)
{
m.def("long running_func", []()
{
for (;;) {
if (PyErr_CheckSignals() != 0)
throw py::error_already_set();
// Long running iteration
}
});
}
Inconsistent detection of Python version in CMake and pybind11
==============================================================
The functions ``find_package(PythonInterp)`` and ``find_package(PythonLibs)`` provided by CMake
for Python version detection are not used by pybind11 due to unreliability and limitations that make
them unsuitable for pybind11's needs. Instead pybind provides its own, more reliable Python detection
CMake code. Conflicts can arise, however, when using pybind11 in a project that *also* uses the CMake
Python detection in a system with several Python versions installed.
This difference may cause inconsistencies and errors if *both* mechanisms are used in the same project. Consider the following
Cmake code executed in a system with Python 2.7 and 3.x installed:
.. code-block:: cmake
find_package(PythonInterp)
find_package(PythonLibs)
find_package(pybind11)
It will detect Python 2.7 and pybind11 will pick it as well.
In contrast this code:
.. code-block:: cmake
find_package(pybind11)
find_package(PythonInterp)
find_package(PythonLibs)
will detect Python 3.x for pybind11 and may crash on ``find_package(PythonLibs)`` afterwards.
It is advised to avoid using ``find_package(PythonInterp)`` and ``find_package(PythonLibs)`` from CMake and rely
on pybind11 in detecting Python version. If this is not possible CMake machinery should be called *before* including pybind11.
How to cite this project?
=========================
We suggest the following BibTeX template to cite pybind11 in scientific
discourse:
.. code-block:: bash
@misc{pybind11,
author = {Wenzel Jakob and Jason Rhinelander and Dean Moldovan},
year = {2017},
note = {https://github.com/pybind/pybind11},
title = {pybind11 -- Seamless operability between C++11 and Python}
}
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/faq.rst | faq.rst |
Upgrade guide
#############
This is a companion guide to the :doc:`changelog`. While the changelog briefly
lists all of the new features, improvements and bug fixes, this upgrade guide
focuses only the subset which directly impacts your experience when upgrading
to a new version. But it goes into more detail. This includes things like
deprecated APIs and their replacements, build system changes, general code
modernization and other useful information.
v2.2
====
Deprecation of the ``PYBIND11_PLUGIN`` macro
--------------------------------------------
``PYBIND11_MODULE`` is now the preferred way to create module entry points.
The old macro emits a compile-time deprecation warning.
.. code-block:: cpp
// old
PYBIND11_PLUGIN(example) {
py::module m("example", "documentation string");
m.def("add", [](int a, int b) { return a + b; });
return m.ptr();
}
// new
PYBIND11_MODULE(example, m) {
m.doc() = "documentation string"; // optional
m.def("add", [](int a, int b) { return a + b; });
}
New API for defining custom constructors and pickling functions
---------------------------------------------------------------
The old placement-new custom constructors have been deprecated. The new approach
uses ``py::init()`` and factory functions to greatly improve type safety.
Placement-new can be called accidentally with an incompatible type (without any
compiler errors or warnings), or it can initialize the same object multiple times
if not careful with the Python-side ``__init__`` calls. The new-style custom
constructors prevent such mistakes. See :ref:`custom_constructors` for details.
.. code-block:: cpp
// old -- deprecated (runtime warning shown only in debug mode)
py::class<Foo>(m, "Foo")
.def("__init__", [](Foo &self, ...) {
new (&self) Foo(...); // uses placement-new
});
// new
py::class<Foo>(m, "Foo")
.def(py::init([](...) { // Note: no `self` argument
return new Foo(...); // return by raw pointer
// or: return std::make_unique<Foo>(...); // return by holder
// or: return Foo(...); // return by value (move constructor)
}));
Mirroring the custom constructor changes, ``py::pickle()`` is now the preferred
way to get and set object state. See :ref:`pickling` for details.
.. code-block:: cpp
// old -- deprecated (runtime warning shown only in debug mode)
py::class<Foo>(m, "Foo")
...
.def("__getstate__", [](const Foo &self) {
return py::make_tuple(self.value1(), self.value2(), ...);
})
.def("__setstate__", [](Foo &self, py::tuple t) {
new (&self) Foo(t[0].cast<std::string>(), ...);
});
// new
py::class<Foo>(m, "Foo")
...
.def(py::pickle(
[](const Foo &self) { // __getstate__
return py::make_tuple(f.value1(), f.value2(), ...); // unchanged
},
[](py::tuple t) { // __setstate__, note: no `self` argument
return new Foo(t[0].cast<std::string>(), ...);
// or: return std::make_unique<Foo>(...); // return by holder
// or: return Foo(...); // return by value (move constructor)
}
));
For both the constructors and pickling, warnings are shown at module
initialization time (on import, not when the functions are called).
They're only visible when compiled in debug mode. Sample warning:
.. code-block:: none
pybind11-bound class 'mymodule.Foo' is using an old-style placement-new '__init__'
which has been deprecated. See the upgrade guide in pybind11's docs.
Stricter enforcement of hidden symbol visibility for pybind11 modules
---------------------------------------------------------------------
pybind11 now tries to actively enforce hidden symbol visibility for modules.
If you're using either one of pybind11's :doc:`CMake or Python build systems
<compiling>` (the two example repositories) and you haven't been exporting any
symbols, there's nothing to be concerned about. All the changes have been done
transparently in the background. If you were building manually or relied on
specific default visibility, read on.
Setting default symbol visibility to *hidden* has always been recommended for
pybind11 (see :ref:`faq:symhidden`). On Linux and macOS, hidden symbol
visibility (in conjunction with the ``strip`` utility) yields much smaller
module binaries. `CPython's extension docs`_ also recommend hiding symbols
by default, with the goal of avoiding symbol name clashes between modules.
Starting with v2.2, pybind11 enforces this more strictly: (1) by declaring
all symbols inside the ``pybind11`` namespace as hidden and (2) by including
the ``-fvisibility=hidden`` flag on Linux and macOS (only for extension
modules, not for embedding the interpreter).
.. _CPython's extension docs: https://docs.python.org/3/extending/extending.html#providing-a-c-api-for-an-extension-module
The namespace-scope hidden visibility is done automatically in pybind11's
headers and it's generally transparent to users. It ensures that:
* Modules compiled with different pybind11 versions don't clash with each other.
* Some new features, like ``py::module_local`` bindings, can work as intended.
The ``-fvisibility=hidden`` flag applies the same visibility to user bindings
outside of the ``pybind11`` namespace. It's now set automatic by pybind11's
CMake and Python build systems, but this needs to be done manually by users
of other build systems. Adding this flag:
* Minimizes the chances of symbol conflicts between modules. E.g. if two
unrelated modules were statically linked to different (ABI-incompatible)
versions of the same third-party library, a symbol clash would be likely
(and would end with unpredictable results).
* Produces smaller binaries on Linux and macOS, as pointed out previously.
Within pybind11's CMake build system, ``pybind11_add_module`` has always been
setting the ``-fvisibility=hidden`` flag in release mode. From now on, it's
being applied unconditionally, even in debug mode and it can no longer be opted
out of with the ``NO_EXTRAS`` option. The ``pybind11::module`` target now also
adds this flag to it's interface. The ``pybind11::embed`` target is unchanged.
The most significant change here is for the ``pybind11::module`` target. If you
were previously relying on default visibility, i.e. if your Python module was
doubling as a shared library with dependents, you'll need to either export
symbols manually (recommended for cross-platform libraries) or factor out the
shared library (and have the Python module link to it like the other
dependents). As a temporary workaround, you can also restore default visibility
using the CMake code below, but this is not recommended in the long run:
.. code-block:: cmake
target_link_libraries(mymodule PRIVATE pybind11::module)
add_library(restore_default_visibility INTERFACE)
target_compile_options(restore_default_visibility INTERFACE -fvisibility=default)
target_link_libraries(mymodule PRIVATE restore_default_visibility)
Local STL container bindings
----------------------------
Previous pybind11 versions could only bind types globally -- all pybind11
modules, even unrelated ones, would have access to the same exported types.
However, this would also result in a conflict if two modules exported the
same C++ type, which is especially problematic for very common types, e.g.
``std::vector<int>``. :ref:`module_local` were added to resolve this (see
that section for a complete usage guide).
``py::class_`` still defaults to global bindings (because these types are
usually unique across modules), however in order to avoid clashes of opaque
types, ``py::bind_vector`` and ``py::bind_map`` will now bind STL containers
as ``py::module_local`` if their elements are: builtins (``int``, ``float``,
etc.), not bound using ``py::class_``, or bound as ``py::module_local``. For
example, this change allows multiple modules to bind ``std::vector<int>``
without causing conflicts. See :ref:`stl_bind` for more details.
When upgrading to this version, if you have multiple modules which depend on
a single global binding of an STL container, note that all modules can still
accept foreign ``py::module_local`` types in the direction of Python-to-C++.
The locality only affects the C++-to-Python direction. If this is needed in
multiple modules, you'll need to either:
* Add a copy of the same STL binding to all of the modules which need it.
* Restore the global status of that single binding by marking it
``py::module_local(false)``.
The latter is an easy workaround, but in the long run it would be best to
localize all common type bindings in order to avoid conflicts with
third-party modules.
Negative strides for Python buffer objects and numpy arrays
-----------------------------------------------------------
Support for negative strides required changing the integer type from unsigned
to signed in the interfaces of ``py::buffer_info`` and ``py::array``. If you
have compiler warnings enabled, you may notice some new conversion warnings
after upgrading. These can be resolved using ``static_cast``.
Deprecation of some ``py::object`` APIs
---------------------------------------
To compare ``py::object`` instances by pointer, you should now use
``obj1.is(obj2)`` which is equivalent to ``obj1 is obj2`` in Python.
Previously, pybind11 used ``operator==`` for this (``obj1 == obj2``), but
that could be confusing and is now deprecated (so that it can eventually
be replaced with proper rich object comparison in a future release).
For classes which inherit from ``py::object``, ``borrowed`` and ``stolen``
were previously available as protected constructor tags. Now the types
should be used directly instead: ``borrowed_t{}`` and ``stolen_t{}``
(`#771 <https://github.com/pybind/pybind11/pull/771>`_).
Stricter compile-time error checking
------------------------------------
Some error checks have been moved from run time to compile time. Notably,
automatic conversion of ``std::shared_ptr<T>`` is not possible when ``T`` is
not directly registered with ``py::class_<T>`` (e.g. ``std::shared_ptr<int>``
or ``std::shared_ptr<std::vector<T>>`` are not automatically convertible).
Attempting to bind a function with such arguments now results in a compile-time
error instead of waiting to fail at run time.
``py::init<...>()`` constructor definitions are also stricter and now prevent
bindings which could cause unexpected behavior:
.. code-block:: cpp
struct Example {
Example(int &);
};
py::class_<Example>(m, "Example")
.def(py::init<int &>()); // OK, exact match
// .def(py::init<int>()); // compile-time error, mismatch
A non-``const`` lvalue reference is not allowed to bind to an rvalue. However,
note that a constructor taking ``const T &`` can still be registered using
``py::init<T>()`` because a ``const`` lvalue reference can bind to an rvalue.
v2.1
====
Minimum compiler versions are enforced at compile time
------------------------------------------------------
The minimums also apply to v2.0 but the check is now explicit and a compile-time
error is raised if the compiler does not meet the requirements:
* GCC >= 4.8
* clang >= 3.3 (appleclang >= 5.0)
* MSVC >= 2015u3
* Intel C++ >= 15.0
The ``py::metaclass`` attribute is not required for static properties
---------------------------------------------------------------------
Binding classes with static properties is now possible by default. The
zero-parameter version of ``py::metaclass()`` is deprecated. However, a new
one-parameter ``py::metaclass(python_type)`` version was added for rare
cases when a custom metaclass is needed to override pybind11's default.
.. code-block:: cpp
// old -- emits a deprecation warning
py::class_<Foo>(m, "Foo", py::metaclass())
.def_property_readonly_static("foo", ...);
// new -- static properties work without the attribute
py::class_<Foo>(m, "Foo")
.def_property_readonly_static("foo", ...);
// new -- advanced feature, override pybind11's default metaclass
py::class_<Bar>(m, "Bar", py::metaclass(custom_python_type))
...
v2.0
====
Breaking changes in ``py::class_``
----------------------------------
These changes were necessary to make type definitions in pybind11
future-proof, to support PyPy via its ``cpyext`` mechanism (`#527
<https://github.com/pybind/pybind11/pull/527>`_), and to improve efficiency
(`rev. 86d825 <https://github.com/pybind/pybind11/commit/86d825>`_).
1. Declarations of types that provide access via the buffer protocol must
now include the ``py::buffer_protocol()`` annotation as an argument to
the ``py::class_`` constructor.
.. code-block:: cpp
py::class_<Matrix>("Matrix", py::buffer_protocol())
.def(py::init<...>())
.def_buffer(...);
2. Classes which include static properties (e.g. ``def_readwrite_static()``)
must now include the ``py::metaclass()`` attribute. Note: this requirement
has since been removed in v2.1. If you're upgrading from 1.x, it's
recommended to skip directly to v2.1 or newer.
3. This version of pybind11 uses a redesigned mechanism for instantiating
trampoline classes that are used to override virtual methods from within
Python. This led to the following user-visible syntax change:
.. code-block:: cpp
// old v1.x syntax
py::class_<TrampolineClass>("MyClass")
.alias<MyClass>()
...
// new v2.x syntax
py::class_<MyClass, TrampolineClass>("MyClass")
...
Importantly, both the original and the trampoline class are now specified
as arguments to the ``py::class_`` template, and the ``alias<..>()`` call
is gone. The new scheme has zero overhead in cases when Python doesn't
override any functions of the underlying C++ class.
`rev. 86d825 <https://github.com/pybind/pybind11/commit/86d825>`_.
The class type must be the first template argument given to ``py::class_``
while the trampoline can be mixed in arbitrary order with other arguments
(see the following section).
Deprecation of the ``py::base<T>()`` attribute
----------------------------------------------
``py::base<T>()`` was deprecated in favor of specifying ``T`` as a template
argument to ``py::class_``. This new syntax also supports multiple inheritance.
Note that, while the type being exported must be the first argument in the
``py::class_<Class, ...>`` template, the order of the following types (bases,
holder and/or trampoline) is not important.
.. code-block:: cpp
// old v1.x
py::class_<Derived>("Derived", py::base<Base>());
// new v2.x
py::class_<Derived, Base>("Derived");
// new -- multiple inheritance
py::class_<Derived, Base1, Base2>("Derived");
// new -- apart from `Derived` the argument order can be arbitrary
py::class_<Derived, Base1, Holder, Base2, Trampoline>("Derived");
Out-of-the-box support for ``std::shared_ptr``
----------------------------------------------
The relevant type caster is now built in, so it's no longer necessary to
include a declaration of the form:
.. code-block:: cpp
PYBIND11_DECLARE_HOLDER_TYPE(T, std::shared_ptr<T>)
Continuing to do so won’t cause an error or even a deprecation warning,
but it's completely redundant.
Deprecation of a few ``py::object`` APIs
----------------------------------------
All of the old-style calls emit deprecation warnings.
+---------------------------------------+---------------------------------------------+
| Old syntax | New syntax |
+=======================================+=============================================+
| ``obj.call(args...)`` | ``obj(args...)`` |
+---------------------------------------+---------------------------------------------+
| ``obj.str()`` | ``py::str(obj)`` |
+---------------------------------------+---------------------------------------------+
| ``auto l = py::list(obj); l.check()`` | ``py::isinstance<py::list>(obj)`` |
+---------------------------------------+---------------------------------------------+
| ``py::object(ptr, true)`` | ``py::reinterpret_borrow<py::object>(ptr)`` |
+---------------------------------------+---------------------------------------------+
| ``py::object(ptr, false)`` | ``py::reinterpret_steal<py::object>(ptr)`` |
+---------------------------------------+---------------------------------------------+
| ``if (obj.attr("foo"))`` | ``if (py::hasattr(obj, "foo"))`` |
+---------------------------------------+---------------------------------------------+
| ``if (obj["bar"])`` | ``if (obj.contains("bar"))`` |
+---------------------------------------+---------------------------------------------+
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/upgrade.rst | upgrade.rst |
Limitations
###########
pybind11 strives to be a general solution to binding generation, but it also has
certain limitations:
- pybind11 casts away ``const``-ness in function arguments and return values.
This is in line with the Python language, which has no concept of ``const``
values. This means that some additional care is needed to avoid bugs that
would be caught by the type checker in a traditional C++ program.
- The NumPy interface ``pybind11::array`` greatly simplifies accessing
numerical data from C++ (and vice versa), but it's not a full-blown array
class like ``Eigen::Array`` or ``boost.multi_array``.
These features could be implemented but would lead to a significant increase in
complexity. I've decided to draw the line here to keep this project simple and
compact. Users who absolutely require these features are encouraged to fork
pybind11.
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/limitations.rst | limitations.rst |
.. only: not latex
.. image:: pybind11-logo.png
pybind11 --- Seamless operability between C++11 and Python
==========================================================
.. only: not latex
Contents:
.. toctree::
:maxdepth: 1
intro
changelog
upgrade
.. toctree::
:caption: The Basics
:maxdepth: 2
basics
classes
compiling
.. toctree::
:caption: Advanced Topics
:maxdepth: 2
advanced/functions
advanced/classes
advanced/exceptions
advanced/smart_ptrs
advanced/cast/index
advanced/pycpp/index
advanced/embedding
advanced/misc
.. toctree::
:caption: Extra Information
:maxdepth: 1
faq
benchmark
limitations
reference
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/index.rst | index.rst |
To release a new version of pybind11:
- Update the version number and push to pypi
- Update ``pybind11/_version.py`` (set release version, remove 'dev').
- Update ``PYBIND11_VERSION_MAJOR`` etc. in ``include/pybind11/detail/common.h``.
- Ensure that all the information in ``setup.py`` is up-to-date.
- Update version in ``docs/conf.py``.
- Tag release date in ``docs/changelog.rst``.
- ``git add`` and ``git commit``.
- if new minor version: ``git checkout -b vX.Y``, ``git push -u origin vX.Y``
- ``git tag -a vX.Y.Z -m 'vX.Y.Z release'``.
- ``git push``
- ``git push --tags``.
- ``python setup.py sdist upload``.
- ``python setup.py bdist_wheel upload``.
- Get back to work
- Update ``_version.py`` (add 'dev' and increment minor).
- Update version in ``docs/conf.py``
- Update version macros in ``include/pybind11/common.h``
- ``git add`` and ``git commit``.
``git push``
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/release.rst | release.rst |
.. _changelog:
Changelog
#########
Starting with version 1.8.0, pybind11 releases use a `semantic versioning
<http://semver.org>`_ policy.
v2.5.0 (Mar 31, 2020)
-----------------------------------------------------
* Use C++17 fold expressions in type casters, if available. This can
improve performance during overload resolution when functions have
multiple arguments.
`#2043 <https://github.com/pybind/pybind11/pull/2043>`_.
* Changed include directory resolution in ``pybind11/__init__.py``
and installation in ``setup.py``. This fixes a number of open issues
where pybind11 headers could not be found in certain environments.
`#1995 <https://github.com/pybind/pybind11/pull/1995>`_.
* C++20 ``char8_t`` and ``u8string`` support. `#2026
<https://github.com/pybind/pybind11/pull/2026>`_.
* CMake: search for Python 3.9. `bb9c91
<https://github.com/pybind/pybind11/commit/bb9c91>`_.
* Fixes for MSYS-based build environments.
`#2087 <https://github.com/pybind/pybind11/pull/2087>`_,
`#2053 <https://github.com/pybind/pybind11/pull/2053>`_.
* STL bindings for ``std::vector<...>::clear``. `#2074
<https://github.com/pybind/pybind11/pull/2074>`_.
* Read-only flag for ``py::buffer``. `#1466
<https://github.com/pybind/pybind11/pull/1466>`_.
* Exception handling during module initialization.
`bf2b031 <https://github.com/pybind/pybind11/commit/bf2b031>`_.
* Support linking against a CPython debug build.
`#2025 <https://github.com/pybind/pybind11/pull/2025>`_.
* Fixed issues involving the availability and use of aligned ``new`` and
``delete``. `#1988 <https://github.com/pybind/pybind11/pull/1988>`_,
`759221 <https://github.com/pybind/pybind11/commit/759221>`_.
* Fixed a resource leak upon interpreter shutdown.
`#2020 <https://github.com/pybind/pybind11/pull/2020>`_.
* Fixed error handling in the boolean caster.
`#1976 <https://github.com/pybind/pybind11/pull/1976>`_.
v2.4.3 (Oct 15, 2019)
-----------------------------------------------------
* Adapt pybind11 to a C API convention change in Python 3.8. `#1950
<https://github.com/pybind/pybind11/pull/1950>`_.
v2.4.2 (Sep 21, 2019)
-----------------------------------------------------
* Replaced usage of a C++14 only construct. `#1929
<https://github.com/pybind/pybind11/pull/1929>`_.
* Made an ifdef future-proof for Python >= 4. `f3109d
<https://github.com/pybind/pybind11/commit/f3109d>`_.
v2.4.1 (Sep 20, 2019)
-----------------------------------------------------
* Fixed a problem involving implicit conversion from enumerations to integers
on Python 3.8. `#1780 <https://github.com/pybind/pybind11/pull/1780>`_.
v2.4.0 (Sep 19, 2019)
-----------------------------------------------------
* Try harder to keep pybind11-internal data structures separate when there
are potential ABI incompatibilities. Fixes crashes that occurred when loading
multiple pybind11 extensions that were e.g. compiled by GCC (libstdc++)
and Clang (libc++).
`#1588 <https://github.com/pybind/pybind11/pull/1588>`_ and
`c9f5a <https://github.com/pybind/pybind11/commit/c9f5a>`_.
* Added support for ``__await__``, ``__aiter__``, and ``__anext__`` protocols.
`#1842 <https://github.com/pybind/pybind11/pull/1842>`_.
* ``pybind11_add_module()``: don't strip symbols when compiling in
``RelWithDebInfo`` mode. `#1980
<https://github.com/pybind/pybind11/pull/1980>`_.
* ``enum_``: Reproduce Python behavior when comparing against invalid values
(e.g. ``None``, strings, etc.). Add back support for ``__invert__()``.
`#1912 <https://github.com/pybind/pybind11/pull/1912>`_,
`#1907 <https://github.com/pybind/pybind11/pull/1907>`_.
* List insertion operation for ``py::list``.
Added ``.empty()`` to all collection types.
Added ``py::set::contains()`` and ``py::dict::contains()``.
`#1887 <https://github.com/pybind/pybind11/pull/1887>`_,
`#1884 <https://github.com/pybind/pybind11/pull/1884>`_,
`#1888 <https://github.com/pybind/pybind11/pull/1888>`_.
* ``py::details::overload_cast_impl`` is available in C++11 mode, can be used
like ``overload_cast`` with an additional set of parantheses.
`#1581 <https://github.com/pybind/pybind11/pull/1581>`_.
* Fixed ``get_include()`` on Conda.
`#1877 <https://github.com/pybind/pybind11/pull/1877>`_.
* ``stl_bind.h``: negative indexing support.
`#1882 <https://github.com/pybind/pybind11/pull/1882>`_.
* Minor CMake fix to add MinGW compatibility.
`#1851 <https://github.com/pybind/pybind11/pull/1851>`_.
* GIL-related fixes.
`#1836 <https://github.com/pybind/pybind11/pull/1836>`_,
`8b90b <https://github.com/pybind/pybind11/commit/8b90b>`_.
* Other very minor/subtle fixes and improvements.
`#1329 <https://github.com/pybind/pybind11/pull/1329>`_,
`#1910 <https://github.com/pybind/pybind11/pull/1910>`_,
`#1863 <https://github.com/pybind/pybind11/pull/1863>`_,
`#1847 <https://github.com/pybind/pybind11/pull/1847>`_,
`#1890 <https://github.com/pybind/pybind11/pull/1890>`_,
`#1860 <https://github.com/pybind/pybind11/pull/1860>`_,
`#1848 <https://github.com/pybind/pybind11/pull/1848>`_,
`#1821 <https://github.com/pybind/pybind11/pull/1821>`_,
`#1837 <https://github.com/pybind/pybind11/pull/1837>`_,
`#1833 <https://github.com/pybind/pybind11/pull/1833>`_,
`#1748 <https://github.com/pybind/pybind11/pull/1748>`_,
`#1852 <https://github.com/pybind/pybind11/pull/1852>`_.
v2.3.0 (June 11, 2019)
-----------------------------------------------------
* Significantly reduced module binary size (10-20%) when compiled in C++11 mode
with GCC/Clang, or in any mode with MSVC. Function signatures are now always
precomputed at compile time (this was previously only available in C++14 mode
for non-MSVC compilers).
`#934 <https://github.com/pybind/pybind11/pull/934>`_.
* Add basic support for tag-based static polymorphism, where classes
provide a method to returns the desired type of an instance.
`#1326 <https://github.com/pybind/pybind11/pull/1326>`_.
* Python type wrappers (``py::handle``, ``py::object``, etc.)
now support map Python's number protocol onto C++ arithmetic
operators such as ``operator+``, ``operator/=``, etc.
`#1511 <https://github.com/pybind/pybind11/pull/1511>`_.
* A number of improvements related to enumerations:
1. The ``enum_`` implementation was rewritten from scratch to reduce
code bloat. Rather than instantiating a full implementation for each
enumeration, most code is now contained in a generic base class.
`#1511 <https://github.com/pybind/pybind11/pull/1511>`_.
2. The ``value()`` method of ``py::enum_`` now accepts an optional
docstring that will be shown in the documentation of the associated
enumeration. `#1160 <https://github.com/pybind/pybind11/pull/1160>`_.
3. check for already existing enum value and throw an error if present.
`#1453 <https://github.com/pybind/pybind11/pull/1453>`_.
* Support for over-aligned type allocation via C++17's aligned ``new``
statement. `#1582 <https://github.com/pybind/pybind11/pull/1582>`_.
* Added ``py::ellipsis()`` method for slicing of multidimensional NumPy arrays
`#1502 <https://github.com/pybind/pybind11/pull/1502>`_.
* Numerous Improvements to the ``mkdoc.py`` script for extracting documentation
from C++ header files.
`#1788 <https://github.com/pybind/pybind11/pull/1788>`_.
* ``pybind11_add_module()``: allow including Python as a ``SYSTEM`` include path.
`#1416 <https://github.com/pybind/pybind11/pull/1416>`_.
* ``pybind11/stl.h`` does not convert strings to ``vector<string>`` anymore.
`#1258 <https://github.com/pybind/pybind11/issues/1258>`_.
* Mark static methods as such to fix auto-generated Sphinx documentation.
`#1732 <https://github.com/pybind/pybind11/pull/1732>`_.
* Re-throw forced unwind exceptions (e.g. during pthread termination).
`#1208 <https://github.com/pybind/pybind11/pull/1208>`_.
* Added ``__contains__`` method to the bindings of maps (``std::map``,
``std::unordered_map``).
`#1767 <https://github.com/pybind/pybind11/pull/1767>`_.
* Improvements to ``gil_scoped_acquire``.
`#1211 <https://github.com/pybind/pybind11/pull/1211>`_.
* Type caster support for ``std::deque<T>``.
`#1609 <https://github.com/pybind/pybind11/pull/1609>`_.
* Support for ``std::unique_ptr`` holders, whose deleters differ between a base and derived
class. `#1353 <https://github.com/pybind/pybind11/pull/1353>`_.
* Construction of STL array/vector-like data structures from
iterators. Added an ``extend()`` operation.
`#1709 <https://github.com/pybind/pybind11/pull/1709>`_,
* CMake build system improvements for projects that include non-C++
files (e.g. plain C, CUDA) in ``pybind11_add_module`` et al.
`#1678 <https://github.com/pybind/pybind11/pull/1678>`_.
* Fixed asynchronous invocation and deallocation of Python functions
wrapped in ``std::function``.
`#1595 <https://github.com/pybind/pybind11/pull/1595>`_.
* Fixes regarding return value policy propagation in STL type casters.
`#1603 <https://github.com/pybind/pybind11/pull/1603>`_.
* Fixed scoped enum comparisons.
`#1571 <https://github.com/pybind/pybind11/pull/1571>`_.
* Fixed iostream redirection for code that releases the GIL.
`#1368 <https://github.com/pybind/pybind11/pull/1368>`_,
* A number of CI-related fixes.
`#1757 <https://github.com/pybind/pybind11/pull/1757>`_,
`#1744 <https://github.com/pybind/pybind11/pull/1744>`_,
`#1670 <https://github.com/pybind/pybind11/pull/1670>`_.
v2.2.4 (September 11, 2018)
-----------------------------------------------------
* Use new Python 3.7 Thread Specific Storage (TSS) implementation if available.
`#1454 <https://github.com/pybind/pybind11/pull/1454>`_,
`#1517 <https://github.com/pybind/pybind11/pull/1517>`_.
* Fixes for newer MSVC versions and C++17 mode.
`#1347 <https://github.com/pybind/pybind11/pull/1347>`_,
`#1462 <https://github.com/pybind/pybind11/pull/1462>`_.
* Propagate return value policies to type-specific casters
when casting STL containers.
`#1455 <https://github.com/pybind/pybind11/pull/1455>`_.
* Allow ostream-redirection of more than 1024 characters.
`#1479 <https://github.com/pybind/pybind11/pull/1479>`_.
* Set ``Py_DEBUG`` define when compiling against a debug Python build.
`#1438 <https://github.com/pybind/pybind11/pull/1438>`_.
* Untangle integer logic in number type caster to work for custom
types that may only be castable to a restricted set of builtin types.
`#1442 <https://github.com/pybind/pybind11/pull/1442>`_.
* CMake build system: Remember Python version in cache file.
`#1434 <https://github.com/pybind/pybind11/pull/1434>`_.
* Fix for custom smart pointers: use ``std::addressof`` to obtain holder
address instead of ``operator&``.
`#1435 <https://github.com/pybind/pybind11/pull/1435>`_.
* Properly report exceptions thrown during module initialization.
`#1362 <https://github.com/pybind/pybind11/pull/1362>`_.
* Fixed a segmentation fault when creating empty-shaped NumPy array.
`#1371 <https://github.com/pybind/pybind11/pull/1371>`_.
* The version of Intel C++ compiler must be >= 2017, and this is now checked by
the header files. `#1363 <https://github.com/pybind/pybind11/pull/1363>`_.
* A few minor typo fixes and improvements to the test suite, and
patches that silence compiler warnings.
* Vectors now support construction from generators, as well as ``extend()`` from a
list or generator.
`#1496 <https://github.com/pybind/pybind11/pull/1496>`_.
v2.2.3 (April 29, 2018)
-----------------------------------------------------
* The pybind11 header location detection was replaced by a new implementation
that no longer depends on ``pip`` internals (the recently released ``pip``
10 has restricted access to this API).
`#1190 <https://github.com/pybind/pybind11/pull/1190>`_.
* Small adjustment to an implementation detail to work around a compiler segmentation fault in Clang 3.3/3.4.
`#1350 <https://github.com/pybind/pybind11/pull/1350>`_.
* The minimal supported version of the Intel compiler was >= 17.0 since
pybind11 v2.1. This check is now explicit, and a compile-time error is raised
if the compiler meet the requirement.
`#1363 <https://github.com/pybind/pybind11/pull/1363>`_.
* Fixed an endianness-related fault in the test suite.
`#1287 <https://github.com/pybind/pybind11/pull/1287>`_.
v2.2.2 (February 7, 2018)
-----------------------------------------------------
* Fixed a segfault when combining embedded interpreter
shutdown/reinitialization with external loaded pybind11 modules.
`#1092 <https://github.com/pybind/pybind11/pull/1092>`_.
* Eigen support: fixed a bug where Nx1/1xN numpy inputs couldn't be passed as
arguments to Eigen vectors (which for Eigen are simply compile-time fixed
Nx1/1xN matrices).
`#1106 <https://github.com/pybind/pybind11/pull/1106>`_.
* Clarified to license by moving the licensing of contributions from
``LICENSE`` into ``CONTRIBUTING.md``: the licensing of contributions is not
actually part of the software license as distributed. This isn't meant to be
a substantial change in the licensing of the project, but addresses concerns
that the clause made the license non-standard.
`#1109 <https://github.com/pybind/pybind11/issues/1109>`_.
* Fixed a regression introduced in 2.1 that broke binding functions with lvalue
character literal arguments.
`#1128 <https://github.com/pybind/pybind11/pull/1128>`_.
* MSVC: fix for compilation failures under /permissive-, and added the flag to
the appveyor test suite.
`#1155 <https://github.com/pybind/pybind11/pull/1155>`_.
* Fixed ``__qualname__`` generation, and in turn, fixes how class names
(especially nested class names) are shown in generated docstrings.
`#1171 <https://github.com/pybind/pybind11/pull/1171>`_.
* Updated the FAQ with a suggested project citation reference.
`#1189 <https://github.com/pybind/pybind11/pull/1189>`_.
* Added fixes for deprecation warnings when compiled under C++17 with
``-Wdeprecated`` turned on, and add ``-Wdeprecated`` to the test suite
compilation flags.
`#1191 <https://github.com/pybind/pybind11/pull/1191>`_.
* Fixed outdated PyPI URLs in ``setup.py``.
`#1213 <https://github.com/pybind/pybind11/pull/1213>`_.
* Fixed a refcount leak for arguments that end up in a ``py::args`` argument
for functions with both fixed positional and ``py::args`` arguments.
`#1216 <https://github.com/pybind/pybind11/pull/1216>`_.
* Fixed a potential segfault resulting from possible premature destruction of
``py::args``/``py::kwargs`` arguments with overloaded functions.
`#1223 <https://github.com/pybind/pybind11/pull/1223>`_.
* Fixed ``del map[item]`` for a ``stl_bind.h`` bound stl map.
`#1229 <https://github.com/pybind/pybind11/pull/1229>`_.
* Fixed a regression from v2.1.x where the aggregate initialization could
unintentionally end up at a constructor taking a templated
``std::initializer_list<T>`` argument.
`#1249 <https://github.com/pybind/pybind11/pull/1249>`_.
* Fixed an issue where calling a function with a keep_alive policy on the same
nurse/patient pair would cause the internal patient storage to needlessly
grow (unboundedly, if the nurse is long-lived).
`#1251 <https://github.com/pybind/pybind11/issues/1251>`_.
* Various other minor fixes.
v2.2.1 (September 14, 2017)
-----------------------------------------------------
* Added ``py::module::reload()`` member function for reloading a module.
`#1040 <https://github.com/pybind/pybind11/pull/1040>`_.
* Fixed a reference leak in the number converter.
`#1078 <https://github.com/pybind/pybind11/pull/1078>`_.
* Fixed compilation with Clang on host GCC < 5 (old libstdc++ which isn't fully
C++11 compliant). `#1062 <https://github.com/pybind/pybind11/pull/1062>`_.
* Fixed a regression where the automatic ``std::vector<bool>`` caster would
fail to compile. The same fix also applies to any container which returns
element proxies instead of references.
`#1053 <https://github.com/pybind/pybind11/pull/1053>`_.
* Fixed a regression where the ``py::keep_alive`` policy could not be applied
to constructors. `#1065 <https://github.com/pybind/pybind11/pull/1065>`_.
* Fixed a nullptr dereference when loading a ``py::module_local`` type
that's only registered in an external module.
`#1058 <https://github.com/pybind/pybind11/pull/1058>`_.
* Fixed implicit conversion of accessors to types derived from ``py::object``.
`#1076 <https://github.com/pybind/pybind11/pull/1076>`_.
* The ``name`` in ``PYBIND11_MODULE(name, variable)`` can now be a macro.
`#1082 <https://github.com/pybind/pybind11/pull/1082>`_.
* Relaxed overly strict ``py::pickle()`` check for matching get and set types.
`#1064 <https://github.com/pybind/pybind11/pull/1064>`_.
* Conversion errors now try to be more informative when it's likely that
a missing header is the cause (e.g. forgetting ``<pybind11/stl.h>``).
`#1077 <https://github.com/pybind/pybind11/pull/1077>`_.
v2.2.0 (August 31, 2017)
-----------------------------------------------------
* Support for embedding the Python interpreter. See the
:doc:`documentation page </advanced/embedding>` for a
full overview of the new features.
`#774 <https://github.com/pybind/pybind11/pull/774>`_,
`#889 <https://github.com/pybind/pybind11/pull/889>`_,
`#892 <https://github.com/pybind/pybind11/pull/892>`_,
`#920 <https://github.com/pybind/pybind11/pull/920>`_.
.. code-block:: cpp
#include <pybind11/embed.h>
namespace py = pybind11;
int main() {
py::scoped_interpreter guard{}; // start the interpreter and keep it alive
py::print("Hello, World!"); // use the Python API
}
* Support for inheriting from multiple C++ bases in Python.
`#693 <https://github.com/pybind/pybind11/pull/693>`_.
.. code-block:: python
from cpp_module import CppBase1, CppBase2
class PyDerived(CppBase1, CppBase2):
def __init__(self):
CppBase1.__init__(self) # C++ bases must be initialized explicitly
CppBase2.__init__(self)
* ``PYBIND11_MODULE`` is now the preferred way to create module entry points.
``PYBIND11_PLUGIN`` is deprecated. See :ref:`macros` for details.
`#879 <https://github.com/pybind/pybind11/pull/879>`_.
.. code-block:: cpp
// new
PYBIND11_MODULE(example, m) {
m.def("add", [](int a, int b) { return a + b; });
}
// old
PYBIND11_PLUGIN(example) {
py::module m("example");
m.def("add", [](int a, int b) { return a + b; });
return m.ptr();
}
* pybind11's headers and build system now more strictly enforce hidden symbol
visibility for extension modules. This should be seamless for most users,
but see the :doc:`upgrade` if you use a custom build system.
`#995 <https://github.com/pybind/pybind11/pull/995>`_.
* Support for ``py::module_local`` types which allow multiple modules to
export the same C++ types without conflicts. This is useful for opaque
types like ``std::vector<int>``. ``py::bind_vector`` and ``py::bind_map``
now default to ``py::module_local`` if their elements are builtins or
local types. See :ref:`module_local` for details.
`#949 <https://github.com/pybind/pybind11/pull/949>`_,
`#981 <https://github.com/pybind/pybind11/pull/981>`_,
`#995 <https://github.com/pybind/pybind11/pull/995>`_,
`#997 <https://github.com/pybind/pybind11/pull/997>`_.
* Custom constructors can now be added very easily using lambdas or factory
functions which return a class instance by value, pointer or holder. This
supersedes the old placement-new ``__init__`` technique.
See :ref:`custom_constructors` for details.
`#805 <https://github.com/pybind/pybind11/pull/805>`_,
`#1014 <https://github.com/pybind/pybind11/pull/1014>`_.
.. code-block:: cpp
struct Example {
Example(std::string);
};
py::class_<Example>(m, "Example")
.def(py::init<std::string>()) // existing constructor
.def(py::init([](int n) { // custom constructor
return std::make_unique<Example>(std::to_string(n));
}));
* Similarly to custom constructors, pickling support functions are now bound
using the ``py::pickle()`` adaptor which improves type safety. See the
:doc:`upgrade` and :ref:`pickling` for details.
`#1038 <https://github.com/pybind/pybind11/pull/1038>`_.
* Builtin support for converting C++17 standard library types and general
conversion improvements:
1. C++17 ``std::variant`` is supported right out of the box. C++11/14
equivalents (e.g. ``boost::variant``) can also be added with a simple
user-defined specialization. See :ref:`cpp17_container_casters` for details.
`#811 <https://github.com/pybind/pybind11/pull/811>`_,
`#845 <https://github.com/pybind/pybind11/pull/845>`_,
`#989 <https://github.com/pybind/pybind11/pull/989>`_.
2. Out-of-the-box support for C++17 ``std::string_view``.
`#906 <https://github.com/pybind/pybind11/pull/906>`_.
3. Improved compatibility of the builtin ``optional`` converter.
`#874 <https://github.com/pybind/pybind11/pull/874>`_.
4. The ``bool`` converter now accepts ``numpy.bool_`` and types which
define ``__bool__`` (Python 3.x) or ``__nonzero__`` (Python 2.7).
`#925 <https://github.com/pybind/pybind11/pull/925>`_.
5. C++-to-Python casters are now more efficient and move elements out
of rvalue containers whenever possible.
`#851 <https://github.com/pybind/pybind11/pull/851>`_,
`#936 <https://github.com/pybind/pybind11/pull/936>`_,
`#938 <https://github.com/pybind/pybind11/pull/938>`_.
6. Fixed ``bytes`` to ``std::string/char*`` conversion on Python 3.
`#817 <https://github.com/pybind/pybind11/pull/817>`_.
7. Fixed lifetime of temporary C++ objects created in Python-to-C++ conversions.
`#924 <https://github.com/pybind/pybind11/pull/924>`_.
* Scope guard call policy for RAII types, e.g. ``py::call_guard<py::gil_scoped_release>()``,
``py::call_guard<py::scoped_ostream_redirect>()``. See :ref:`call_policies` for details.
`#740 <https://github.com/pybind/pybind11/pull/740>`_.
* Utility for redirecting C++ streams to Python (e.g. ``std::cout`` ->
``sys.stdout``). Scope guard ``py::scoped_ostream_redirect`` in C++ and
a context manager in Python. See :ref:`ostream_redirect`.
`#1009 <https://github.com/pybind/pybind11/pull/1009>`_.
* Improved handling of types and exceptions across module boundaries.
`#915 <https://github.com/pybind/pybind11/pull/915>`_,
`#951 <https://github.com/pybind/pybind11/pull/951>`_,
`#995 <https://github.com/pybind/pybind11/pull/995>`_.
* Fixed destruction order of ``py::keep_alive`` nurse/patient objects
in reference cycles.
`#856 <https://github.com/pybind/pybind11/pull/856>`_.
* Numpy and buffer protocol related improvements:
1. Support for negative strides in Python buffer objects/numpy arrays. This
required changing integers from unsigned to signed for the related C++ APIs.
Note: If you have compiler warnings enabled, you may notice some new conversion
warnings after upgrading. These can be resolved with ``static_cast``.
`#782 <https://github.com/pybind/pybind11/pull/782>`_.
2. Support ``std::complex`` and arrays inside ``PYBIND11_NUMPY_DTYPE``.
`#831 <https://github.com/pybind/pybind11/pull/831>`_,
`#832 <https://github.com/pybind/pybind11/pull/832>`_.
3. Support for constructing ``py::buffer_info`` and ``py::arrays`` using
arbitrary containers or iterators instead of requiring a ``std::vector``.
`#788 <https://github.com/pybind/pybind11/pull/788>`_,
`#822 <https://github.com/pybind/pybind11/pull/822>`_,
`#860 <https://github.com/pybind/pybind11/pull/860>`_.
4. Explicitly check numpy version and require >= 1.7.0.
`#819 <https://github.com/pybind/pybind11/pull/819>`_.
* Support for allowing/prohibiting ``None`` for specific arguments and improved
``None`` overload resolution order. See :ref:`none_arguments` for details.
`#843 <https://github.com/pybind/pybind11/pull/843>`_.
`#859 <https://github.com/pybind/pybind11/pull/859>`_.
* Added ``py::exec()`` as a shortcut for ``py::eval<py::eval_statements>()``
and support for C++11 raw string literals as input. See :ref:`eval`.
`#766 <https://github.com/pybind/pybind11/pull/766>`_,
`#827 <https://github.com/pybind/pybind11/pull/827>`_.
* ``py::vectorize()`` ignores non-vectorizable arguments and supports
member functions.
`#762 <https://github.com/pybind/pybind11/pull/762>`_.
* Support for bound methods as callbacks (``pybind11/functional.h``).
`#815 <https://github.com/pybind/pybind11/pull/815>`_.
* Allow aliasing pybind11 methods: ``cls.attr("foo") = cls.attr("bar")``.
`#802 <https://github.com/pybind/pybind11/pull/802>`_.
* Don't allow mixed static/non-static overloads.
`#804 <https://github.com/pybind/pybind11/pull/804>`_.
* Fixed overriding static properties in derived classes.
`#784 <https://github.com/pybind/pybind11/pull/784>`_.
* Added support for write only properties.
`#1144 <https://github.com/pybind/pybind11/pull/1144>`_.
* Improved deduction of member functions of a derived class when its bases
aren't registered with pybind11.
`#855 <https://github.com/pybind/pybind11/pull/855>`_.
.. code-block:: cpp
struct Base {
int foo() { return 42; }
}
struct Derived : Base {}
// Now works, but previously required also binding `Base`
py::class_<Derived>(m, "Derived")
.def("foo", &Derived::foo); // function is actually from `Base`
* The implementation of ``py::init<>`` now uses C++11 brace initialization
syntax to construct instances, which permits binding implicit constructors of
aggregate types. `#1015 <https://github.com/pybind/pybind11/pull/1015>`_.
.. code-block:: cpp
struct Aggregate {
int a;
std::string b;
};
py::class_<Aggregate>(m, "Aggregate")
.def(py::init<int, const std::string &>());
* Fixed issues with multiple inheritance with offset base/derived pointers.
`#812 <https://github.com/pybind/pybind11/pull/812>`_,
`#866 <https://github.com/pybind/pybind11/pull/866>`_,
`#960 <https://github.com/pybind/pybind11/pull/960>`_.
* Fixed reference leak of type objects.
`#1030 <https://github.com/pybind/pybind11/pull/1030>`_.
* Improved support for the ``/std:c++14`` and ``/std:c++latest`` modes
on MSVC 2017.
`#841 <https://github.com/pybind/pybind11/pull/841>`_,
`#999 <https://github.com/pybind/pybind11/pull/999>`_.
* Fixed detection of private operator new on MSVC.
`#893 <https://github.com/pybind/pybind11/pull/893>`_,
`#918 <https://github.com/pybind/pybind11/pull/918>`_.
* Intel C++ compiler compatibility fixes.
`#937 <https://github.com/pybind/pybind11/pull/937>`_.
* Fixed implicit conversion of `py::enum_` to integer types on Python 2.7.
`#821 <https://github.com/pybind/pybind11/pull/821>`_.
* Added ``py::hash`` to fetch the hash value of Python objects, and
``.def(hash(py::self))`` to provide the C++ ``std::hash`` as the Python
``__hash__`` method.
`#1034 <https://github.com/pybind/pybind11/pull/1034>`_.
* Fixed ``__truediv__`` on Python 2 and ``__itruediv__`` on Python 3.
`#867 <https://github.com/pybind/pybind11/pull/867>`_.
* ``py::capsule`` objects now support the ``name`` attribute. This is useful
for interfacing with ``scipy.LowLevelCallable``.
`#902 <https://github.com/pybind/pybind11/pull/902>`_.
* Fixed ``py::make_iterator``'s ``__next__()`` for past-the-end calls.
`#897 <https://github.com/pybind/pybind11/pull/897>`_.
* Added ``error_already_set::matches()`` for checking Python exceptions.
`#772 <https://github.com/pybind/pybind11/pull/772>`_.
* Deprecated ``py::error_already_set::clear()``. It's no longer needed
following a simplification of the ``py::error_already_set`` class.
`#954 <https://github.com/pybind/pybind11/pull/954>`_.
* Deprecated ``py::handle::operator==()`` in favor of ``py::handle::is()``
`#825 <https://github.com/pybind/pybind11/pull/825>`_.
* Deprecated ``py::object::borrowed``/``py::object::stolen``.
Use ``py::object::borrowed_t{}``/``py::object::stolen_t{}`` instead.
`#771 <https://github.com/pybind/pybind11/pull/771>`_.
* Changed internal data structure versioning to avoid conflicts between
modules compiled with different revisions of pybind11.
`#1012 <https://github.com/pybind/pybind11/pull/1012>`_.
* Additional compile-time and run-time error checking and more informative messages.
`#786 <https://github.com/pybind/pybind11/pull/786>`_,
`#794 <https://github.com/pybind/pybind11/pull/794>`_,
`#803 <https://github.com/pybind/pybind11/pull/803>`_.
* Various minor improvements and fixes.
`#764 <https://github.com/pybind/pybind11/pull/764>`_,
`#791 <https://github.com/pybind/pybind11/pull/791>`_,
`#795 <https://github.com/pybind/pybind11/pull/795>`_,
`#840 <https://github.com/pybind/pybind11/pull/840>`_,
`#844 <https://github.com/pybind/pybind11/pull/844>`_,
`#846 <https://github.com/pybind/pybind11/pull/846>`_,
`#849 <https://github.com/pybind/pybind11/pull/849>`_,
`#858 <https://github.com/pybind/pybind11/pull/858>`_,
`#862 <https://github.com/pybind/pybind11/pull/862>`_,
`#871 <https://github.com/pybind/pybind11/pull/871>`_,
`#872 <https://github.com/pybind/pybind11/pull/872>`_,
`#881 <https://github.com/pybind/pybind11/pull/881>`_,
`#888 <https://github.com/pybind/pybind11/pull/888>`_,
`#899 <https://github.com/pybind/pybind11/pull/899>`_,
`#928 <https://github.com/pybind/pybind11/pull/928>`_,
`#931 <https://github.com/pybind/pybind11/pull/931>`_,
`#944 <https://github.com/pybind/pybind11/pull/944>`_,
`#950 <https://github.com/pybind/pybind11/pull/950>`_,
`#952 <https://github.com/pybind/pybind11/pull/952>`_,
`#962 <https://github.com/pybind/pybind11/pull/962>`_,
`#965 <https://github.com/pybind/pybind11/pull/965>`_,
`#970 <https://github.com/pybind/pybind11/pull/970>`_,
`#978 <https://github.com/pybind/pybind11/pull/978>`_,
`#979 <https://github.com/pybind/pybind11/pull/979>`_,
`#986 <https://github.com/pybind/pybind11/pull/986>`_,
`#1020 <https://github.com/pybind/pybind11/pull/1020>`_,
`#1027 <https://github.com/pybind/pybind11/pull/1027>`_,
`#1037 <https://github.com/pybind/pybind11/pull/1037>`_.
* Testing improvements.
`#798 <https://github.com/pybind/pybind11/pull/798>`_,
`#882 <https://github.com/pybind/pybind11/pull/882>`_,
`#898 <https://github.com/pybind/pybind11/pull/898>`_,
`#900 <https://github.com/pybind/pybind11/pull/900>`_,
`#921 <https://github.com/pybind/pybind11/pull/921>`_,
`#923 <https://github.com/pybind/pybind11/pull/923>`_,
`#963 <https://github.com/pybind/pybind11/pull/963>`_.
v2.1.1 (April 7, 2017)
-----------------------------------------------------
* Fixed minimum version requirement for MSVC 2015u3
`#773 <https://github.com/pybind/pybind11/pull/773>`_.
v2.1.0 (March 22, 2017)
-----------------------------------------------------
* pybind11 now performs function overload resolution in two phases. The first
phase only considers exact type matches, while the second allows for implicit
conversions to take place. A special ``noconvert()`` syntax can be used to
completely disable implicit conversions for specific arguments.
`#643 <https://github.com/pybind/pybind11/pull/643>`_,
`#634 <https://github.com/pybind/pybind11/pull/634>`_,
`#650 <https://github.com/pybind/pybind11/pull/650>`_.
* Fixed a regression where static properties no longer worked with classes
using multiple inheritance. The ``py::metaclass`` attribute is no longer
necessary (and deprecated as of this release) when binding classes with
static properties.
`#679 <https://github.com/pybind/pybind11/pull/679>`_,
* Classes bound using ``pybind11`` can now use custom metaclasses.
`#679 <https://github.com/pybind/pybind11/pull/679>`_,
* ``py::args`` and ``py::kwargs`` can now be mixed with other positional
arguments when binding functions using pybind11.
`#611 <https://github.com/pybind/pybind11/pull/611>`_.
* Improved support for C++11 unicode string and character types; added
extensive documentation regarding pybind11's string conversion behavior.
`#624 <https://github.com/pybind/pybind11/pull/624>`_,
`#636 <https://github.com/pybind/pybind11/pull/636>`_,
`#715 <https://github.com/pybind/pybind11/pull/715>`_.
* pybind11 can now avoid expensive copies when converting Eigen arrays to NumPy
arrays (and vice versa). `#610 <https://github.com/pybind/pybind11/pull/610>`_.
* The "fast path" in ``py::vectorize`` now works for any full-size group of C or
F-contiguous arrays. The non-fast path is also faster since it no longer performs
copies of the input arguments (except when type conversions are necessary).
`#610 <https://github.com/pybind/pybind11/pull/610>`_.
* Added fast, unchecked access to NumPy arrays via a proxy object.
`#746 <https://github.com/pybind/pybind11/pull/746>`_.
* Transparent support for class-specific ``operator new`` and
``operator delete`` implementations.
`#755 <https://github.com/pybind/pybind11/pull/755>`_.
* Slimmer and more efficient STL-compatible iterator interface for sequence types.
`#662 <https://github.com/pybind/pybind11/pull/662>`_.
* Improved custom holder type support.
`#607 <https://github.com/pybind/pybind11/pull/607>`_.
* ``nullptr`` to ``None`` conversion fixed in various builtin type casters.
`#732 <https://github.com/pybind/pybind11/pull/732>`_.
* ``enum_`` now exposes its members via a special ``__members__`` attribute.
`#666 <https://github.com/pybind/pybind11/pull/666>`_.
* ``std::vector`` bindings created using ``stl_bind.h`` can now optionally
implement the buffer protocol. `#488 <https://github.com/pybind/pybind11/pull/488>`_.
* Automated C++ reference documentation using doxygen and breathe.
`#598 <https://github.com/pybind/pybind11/pull/598>`_.
* Added minimum compiler version assertions.
`#727 <https://github.com/pybind/pybind11/pull/727>`_.
* Improved compatibility with C++1z.
`#677 <https://github.com/pybind/pybind11/pull/677>`_.
* Improved ``py::capsule`` API. Can be used to implement cleanup
callbacks that are involved at module destruction time.
`#752 <https://github.com/pybind/pybind11/pull/752>`_.
* Various minor improvements and fixes.
`#595 <https://github.com/pybind/pybind11/pull/595>`_,
`#588 <https://github.com/pybind/pybind11/pull/588>`_,
`#589 <https://github.com/pybind/pybind11/pull/589>`_,
`#603 <https://github.com/pybind/pybind11/pull/603>`_,
`#619 <https://github.com/pybind/pybind11/pull/619>`_,
`#648 <https://github.com/pybind/pybind11/pull/648>`_,
`#695 <https://github.com/pybind/pybind11/pull/695>`_,
`#720 <https://github.com/pybind/pybind11/pull/720>`_,
`#723 <https://github.com/pybind/pybind11/pull/723>`_,
`#729 <https://github.com/pybind/pybind11/pull/729>`_,
`#724 <https://github.com/pybind/pybind11/pull/724>`_,
`#742 <https://github.com/pybind/pybind11/pull/742>`_,
`#753 <https://github.com/pybind/pybind11/pull/753>`_.
v2.0.1 (Jan 4, 2017)
-----------------------------------------------------
* Fix pointer to reference error in type_caster on MSVC
`#583 <https://github.com/pybind/pybind11/pull/583>`_.
* Fixed a segmentation in the test suite due to a typo
`cd7eac <https://github.com/pybind/pybind11/commit/cd7eac>`_.
v2.0.0 (Jan 1, 2017)
-----------------------------------------------------
* Fixed a reference counting regression affecting types with custom metaclasses
(introduced in v2.0.0-rc1).
`#571 <https://github.com/pybind/pybind11/pull/571>`_.
* Quenched a CMake policy warning.
`#570 <https://github.com/pybind/pybind11/pull/570>`_.
v2.0.0-rc1 (Dec 23, 2016)
-----------------------------------------------------
The pybind11 developers are excited to issue a release candidate of pybind11
with a subsequent v2.0.0 release planned in early January next year.
An incredible amount of effort by went into pybind11 over the last ~5 months,
leading to a release that is jam-packed with exciting new features and numerous
usability improvements. The following list links PRs or individual commits
whenever applicable.
Happy Christmas!
* Support for binding C++ class hierarchies that make use of multiple
inheritance. `#410 <https://github.com/pybind/pybind11/pull/410>`_.
* PyPy support: pybind11 now supports nightly builds of PyPy and will
interoperate with the future 5.7 release. No code changes are necessary,
everything "just" works as usual. Note that we only target the Python 2.7
branch for now; support for 3.x will be added once its ``cpyext`` extension
support catches up. A few minor features remain unsupported for the time
being (notably dynamic attributes in custom types).
`#527 <https://github.com/pybind/pybind11/pull/527>`_.
* Significant work on the documentation -- in particular, the monolithic
``advanced.rst`` file was restructured into a easier to read hierarchical
organization. `#448 <https://github.com/pybind/pybind11/pull/448>`_.
* Many NumPy-related improvements:
1. Object-oriented API to access and modify NumPy ``ndarray`` instances,
replicating much of the corresponding NumPy C API functionality.
`#402 <https://github.com/pybind/pybind11/pull/402>`_.
2. NumPy array ``dtype`` array descriptors are now first-class citizens and
are exposed via a new class ``py::dtype``.
3. Structured dtypes can be registered using the ``PYBIND11_NUMPY_DTYPE()``
macro. Special ``array`` constructors accepting dtype objects were also
added.
One potential caveat involving this change: format descriptor strings
should now be accessed via ``format_descriptor::format()`` (however, for
compatibility purposes, the old syntax ``format_descriptor::value`` will
still work for non-structured data types). `#308
<https://github.com/pybind/pybind11/pull/308>`_.
4. Further improvements to support structured dtypes throughout the system.
`#472 <https://github.com/pybind/pybind11/pull/472>`_,
`#474 <https://github.com/pybind/pybind11/pull/474>`_,
`#459 <https://github.com/pybind/pybind11/pull/459>`_,
`#453 <https://github.com/pybind/pybind11/pull/453>`_,
`#452 <https://github.com/pybind/pybind11/pull/452>`_, and
`#505 <https://github.com/pybind/pybind11/pull/505>`_.
5. Fast access operators. `#497 <https://github.com/pybind/pybind11/pull/497>`_.
6. Constructors for arrays whose storage is owned by another object.
`#440 <https://github.com/pybind/pybind11/pull/440>`_.
7. Added constructors for ``array`` and ``array_t`` explicitly accepting shape
and strides; if strides are not provided, they are deduced assuming
C-contiguity. Also added simplified constructors for 1-dimensional case.
8. Added buffer/NumPy support for ``char[N]`` and ``std::array<char, N>`` types.
9. Added ``memoryview`` wrapper type which is constructible from ``buffer_info``.
* Eigen: many additional conversions and support for non-contiguous
arrays/slices.
`#427 <https://github.com/pybind/pybind11/pull/427>`_,
`#315 <https://github.com/pybind/pybind11/pull/315>`_,
`#316 <https://github.com/pybind/pybind11/pull/316>`_,
`#312 <https://github.com/pybind/pybind11/pull/312>`_, and
`#267 <https://github.com/pybind/pybind11/pull/267>`_
* Incompatible changes in ``class_<...>::class_()``:
1. Declarations of types that provide access via the buffer protocol must
now include the ``py::buffer_protocol()`` annotation as an argument to
the ``class_`` constructor.
2. Declarations of types that require a custom metaclass (i.e. all classes
which include static properties via commands such as
``def_readwrite_static()``) must now include the ``py::metaclass()``
annotation as an argument to the ``class_`` constructor.
These two changes were necessary to make type definitions in pybind11
future-proof, and to support PyPy via its cpyext mechanism. `#527
<https://github.com/pybind/pybind11/pull/527>`_.
3. This version of pybind11 uses a redesigned mechanism for instantiating
trampoline classes that are used to override virtual methods from within
Python. This led to the following user-visible syntax change: instead of
.. code-block:: cpp
py::class_<TrampolineClass>("MyClass")
.alias<MyClass>()
....
write
.. code-block:: cpp
py::class_<MyClass, TrampolineClass>("MyClass")
....
Importantly, both the original and the trampoline class are now
specified as an arguments (in arbitrary order) to the ``py::class_``
template, and the ``alias<..>()`` call is gone. The new scheme has zero
overhead in cases when Python doesn't override any functions of the
underlying C++ class. `rev. 86d825
<https://github.com/pybind/pybind11/commit/86d825>`_.
* Added ``eval`` and ``eval_file`` functions for evaluating expressions and
statements from a string or file. `rev. 0d3fc3
<https://github.com/pybind/pybind11/commit/0d3fc3>`_.
* pybind11 can now create types with a modifiable dictionary.
`#437 <https://github.com/pybind/pybind11/pull/437>`_ and
`#444 <https://github.com/pybind/pybind11/pull/444>`_.
* Support for translation of arbitrary C++ exceptions to Python counterparts.
`#296 <https://github.com/pybind/pybind11/pull/296>`_ and
`#273 <https://github.com/pybind/pybind11/pull/273>`_.
* Report full backtraces through mixed C++/Python code, better reporting for
import errors, fixed GIL management in exception processing.
`#537 <https://github.com/pybind/pybind11/pull/537>`_,
`#494 <https://github.com/pybind/pybind11/pull/494>`_,
`rev. e72d95 <https://github.com/pybind/pybind11/commit/e72d95>`_, and
`rev. 099d6e <https://github.com/pybind/pybind11/commit/099d6e>`_.
* Support for bit-level operations, comparisons, and serialization of C++
enumerations. `#503 <https://github.com/pybind/pybind11/pull/503>`_,
`#508 <https://github.com/pybind/pybind11/pull/508>`_,
`#380 <https://github.com/pybind/pybind11/pull/380>`_,
`#309 <https://github.com/pybind/pybind11/pull/309>`_.
`#311 <https://github.com/pybind/pybind11/pull/311>`_.
* The ``class_`` constructor now accepts its template arguments in any order.
`#385 <https://github.com/pybind/pybind11/pull/385>`_.
* Attribute and item accessors now have a more complete interface which makes
it possible to chain attributes as in
``obj.attr("a")[key].attr("b").attr("method")(1, 2, 3)``. `#425
<https://github.com/pybind/pybind11/pull/425>`_.
* Major redesign of the default and conversion constructors in ``pytypes.h``.
`#464 <https://github.com/pybind/pybind11/pull/464>`_.
* Added built-in support for ``std::shared_ptr`` holder type. It is no longer
necessary to to include a declaration of the form
``PYBIND11_DECLARE_HOLDER_TYPE(T, std::shared_ptr<T>)`` (though continuing to
do so won't cause an error).
`#454 <https://github.com/pybind/pybind11/pull/454>`_.
* New ``py::overload_cast`` casting operator to select among multiple possible
overloads of a function. An example:
.. code-block:: cpp
py::class_<Pet>(m, "Pet")
.def("set", py::overload_cast<int>(&Pet::set), "Set the pet's age")
.def("set", py::overload_cast<const std::string &>(&Pet::set), "Set the pet's name");
This feature only works on C++14-capable compilers.
`#541 <https://github.com/pybind/pybind11/pull/541>`_.
* C++ types are automatically cast to Python types, e.g. when assigning
them as an attribute. For instance, the following is now legal:
.. code-block:: cpp
py::module m = /* ... */
m.attr("constant") = 123;
(Previously, a ``py::cast`` call was necessary to avoid a compilation error.)
`#551 <https://github.com/pybind/pybind11/pull/551>`_.
* Redesigned ``pytest``-based test suite. `#321 <https://github.com/pybind/pybind11/pull/321>`_.
* Instance tracking to detect reference leaks in test suite. `#324 <https://github.com/pybind/pybind11/pull/324>`_
* pybind11 can now distinguish between multiple different instances that are
located at the same memory address, but which have different types.
`#329 <https://github.com/pybind/pybind11/pull/329>`_.
* Improved logic in ``move`` return value policy.
`#510 <https://github.com/pybind/pybind11/pull/510>`_,
`#297 <https://github.com/pybind/pybind11/pull/297>`_.
* Generalized unpacking API to permit calling Python functions from C++ using
notation such as ``foo(a1, a2, *args, "ka"_a=1, "kb"_a=2, **kwargs)``. `#372 <https://github.com/pybind/pybind11/pull/372>`_.
* ``py::print()`` function whose behavior matches that of the native Python
``print()`` function. `#372 <https://github.com/pybind/pybind11/pull/372>`_.
* Added ``py::dict`` keyword constructor:``auto d = dict("number"_a=42,
"name"_a="World");``. `#372 <https://github.com/pybind/pybind11/pull/372>`_.
* Added ``py::str::format()`` method and ``_s`` literal: ``py::str s = "1 + 2
= {}"_s.format(3);``. `#372 <https://github.com/pybind/pybind11/pull/372>`_.
* Added ``py::repr()`` function which is equivalent to Python's builtin
``repr()``. `#333 <https://github.com/pybind/pybind11/pull/333>`_.
* Improved construction and destruction logic for holder types. It is now
possible to reference instances with smart pointer holder types without
constructing the holder if desired. The ``PYBIND11_DECLARE_HOLDER_TYPE``
macro now accepts an optional second parameter to indicate whether the holder
type uses intrusive reference counting.
`#533 <https://github.com/pybind/pybind11/pull/533>`_ and
`#561 <https://github.com/pybind/pybind11/pull/561>`_.
* Mapping a stateless C++ function to Python and back is now "for free" (i.e.
no extra indirections or argument conversion overheads). `rev. 954b79
<https://github.com/pybind/pybind11/commit/954b79>`_.
* Bindings for ``std::valarray<T>``.
`#545 <https://github.com/pybind/pybind11/pull/545>`_.
* Improved support for C++17 capable compilers.
`#562 <https://github.com/pybind/pybind11/pull/562>`_.
* Bindings for ``std::optional<t>``.
`#475 <https://github.com/pybind/pybind11/pull/475>`_,
`#476 <https://github.com/pybind/pybind11/pull/476>`_,
`#479 <https://github.com/pybind/pybind11/pull/479>`_,
`#499 <https://github.com/pybind/pybind11/pull/499>`_, and
`#501 <https://github.com/pybind/pybind11/pull/501>`_.
* ``stl_bind.h``: general improvements and support for ``std::map`` and
``std::unordered_map``.
`#490 <https://github.com/pybind/pybind11/pull/490>`_,
`#282 <https://github.com/pybind/pybind11/pull/282>`_,
`#235 <https://github.com/pybind/pybind11/pull/235>`_.
* The ``std::tuple``, ``std::pair``, ``std::list``, and ``std::vector`` type
casters now accept any Python sequence type as input. `rev. 107285
<https://github.com/pybind/pybind11/commit/107285>`_.
* Improved CMake Python detection on multi-architecture Linux.
`#532 <https://github.com/pybind/pybind11/pull/532>`_.
* Infrastructure to selectively disable or enable parts of the automatically
generated docstrings. `#486 <https://github.com/pybind/pybind11/pull/486>`_.
* ``reference`` and ``reference_internal`` are now the default return value
properties for static and non-static properties, respectively. `#473
<https://github.com/pybind/pybind11/pull/473>`_. (the previous defaults
were ``automatic``). `#473 <https://github.com/pybind/pybind11/pull/473>`_.
* Support for ``std::unique_ptr`` with non-default deleters or no deleter at
all (``py::nodelete``). `#384 <https://github.com/pybind/pybind11/pull/384>`_.
* Deprecated ``handle::call()`` method. The new syntax to call Python
functions is simply ``handle()``. It can also be invoked explicitly via
``handle::operator<X>()``, where ``X`` is an optional return value policy.
* Print more informative error messages when ``make_tuple()`` or ``cast()``
fail. `#262 <https://github.com/pybind/pybind11/pull/262>`_.
* Creation of holder types for classes deriving from
``std::enable_shared_from_this<>`` now also works for ``const`` values.
`#260 <https://github.com/pybind/pybind11/pull/260>`_.
* ``make_iterator()`` improvements for better compatibility with various
types (now uses prefix increment operator); it now also accepts iterators
with different begin/end types as long as they are equality comparable.
`#247 <https://github.com/pybind/pybind11/pull/247>`_.
* ``arg()`` now accepts a wider range of argument types for default values.
`#244 <https://github.com/pybind/pybind11/pull/244>`_.
* Support ``keep_alive`` where the nurse object may be ``None``. `#341
<https://github.com/pybind/pybind11/pull/341>`_.
* Added constructors for ``str`` and ``bytes`` from zero-terminated char
pointers, and from char pointers and length. Added constructors for ``str``
from ``bytes`` and for ``bytes`` from ``str``, which will perform UTF-8
decoding/encoding as required.
* Many other improvements of library internals without user-visible changes
1.8.1 (July 12, 2016)
----------------------
* Fixed a rare but potentially very severe issue when the garbage collector ran
during pybind11 type creation.
1.8.0 (June 14, 2016)
----------------------
* Redesigned CMake build system which exports a convenient
``pybind11_add_module`` function to parent projects.
* ``std::vector<>`` type bindings analogous to Boost.Python's ``indexing_suite``
* Transparent conversion of sparse and dense Eigen matrices and vectors (``eigen.h``)
* Added an ``ExtraFlags`` template argument to the NumPy ``array_t<>`` wrapper
to disable an enforced cast that may lose precision, e.g. to create overloads
for different precisions and complex vs real-valued matrices.
* Prevent implicit conversion of floating point values to integral types in
function arguments
* Fixed incorrect default return value policy for functions returning a shared
pointer
* Don't allow registering a type via ``class_`` twice
* Don't allow casting a ``None`` value into a C++ lvalue reference
* Fixed a crash in ``enum_::operator==`` that was triggered by the ``help()`` command
* Improved detection of whether or not custom C++ types can be copy/move-constructed
* Extended ``str`` type to also work with ``bytes`` instances
* Added a ``"name"_a`` user defined string literal that is equivalent to ``py::arg("name")``.
* When specifying function arguments via ``py::arg``, the test that verifies
the number of arguments now runs at compile time.
* Added ``[[noreturn]]`` attribute to ``pybind11_fail()`` to quench some
compiler warnings
* List function arguments in exception text when the dispatch code cannot find
a matching overload
* Added ``PYBIND11_OVERLOAD_NAME`` and ``PYBIND11_OVERLOAD_PURE_NAME`` macros which
can be used to override virtual methods whose name differs in C++ and Python
(e.g. ``__call__`` and ``operator()``)
* Various minor ``iterator`` and ``make_iterator()`` improvements
* Transparently support ``__bool__`` on Python 2.x and Python 3.x
* Fixed issue with destructor of unpickled object not being called
* Minor CMake build system improvements on Windows
* New ``pybind11::args`` and ``pybind11::kwargs`` types to create functions which
take an arbitrary number of arguments and keyword arguments
* New syntax to call a Python function from C++ using ``*args`` and ``*kwargs``
* The functions ``def_property_*`` now correctly process docstring arguments (these
formerly caused a segmentation fault)
* Many ``mkdoc.py`` improvements (enumerations, template arguments, ``DOC()``
macro accepts more arguments)
* Cygwin support
* Documentation improvements (pickling support, ``keep_alive``, macro usage)
1.7 (April 30, 2016)
----------------------
* Added a new ``move`` return value policy that triggers C++11 move semantics.
The automatic return value policy falls back to this case whenever a rvalue
reference is encountered
* Significantly more general GIL state routines that are used instead of
Python's troublesome ``PyGILState_Ensure`` and ``PyGILState_Release`` API
* Redesign of opaque types that drastically simplifies their usage
* Extended ability to pass values of type ``[const] void *``
* ``keep_alive`` fix: don't fail when there is no patient
* ``functional.h``: acquire the GIL before calling a Python function
* Added Python RAII type wrappers ``none`` and ``iterable``
* Added ``*args`` and ``*kwargs`` pass-through parameters to
``pybind11.get_include()`` function
* Iterator improvements and fixes
* Documentation on return value policies and opaque types improved
1.6 (April 30, 2016)
----------------------
* Skipped due to upload to PyPI gone wrong and inability to recover
(https://github.com/pypa/packaging-problems/issues/74)
1.5 (April 21, 2016)
----------------------
* For polymorphic types, use RTTI to try to return the closest type registered with pybind11
* Pickling support for serializing and unserializing C++ instances to a byte stream in Python
* Added a convenience routine ``make_iterator()`` which turns a range indicated
by a pair of C++ iterators into a iterable Python object
* Added ``len()`` and a variadic ``make_tuple()`` function
* Addressed a rare issue that could confuse the current virtual function
dispatcher and another that could lead to crashes in multi-threaded
applications
* Added a ``get_include()`` function to the Python module that returns the path
of the directory containing the installed pybind11 header files
* Documentation improvements: import issues, symbol visibility, pickling, limitations
* Added casting support for ``std::reference_wrapper<>``
1.4 (April 7, 2016)
--------------------------
* Transparent type conversion for ``std::wstring`` and ``wchar_t``
* Allow passing ``nullptr``-valued strings
* Transparent passing of ``void *`` pointers using capsules
* Transparent support for returning values wrapped in ``std::unique_ptr<>``
* Improved docstring generation for compatibility with Sphinx
* Nicer debug error message when default parameter construction fails
* Support for "opaque" types that bypass the transparent conversion layer for STL containers
* Redesigned type casting interface to avoid ambiguities that could occasionally cause compiler errors
* Redesigned property implementation; fixes crashes due to an unfortunate default return value policy
* Anaconda package generation support
1.3 (March 8, 2016)
--------------------------
* Added support for the Intel C++ compiler (v15+)
* Added support for the STL unordered set/map data structures
* Added support for the STL linked list data structure
* NumPy-style broadcasting support in ``pybind11::vectorize``
* pybind11 now displays more verbose error messages when ``arg::operator=()`` fails
* pybind11 internal data structures now live in a version-dependent namespace to avoid ABI issues
* Many, many bugfixes involving corner cases and advanced usage
1.2 (February 7, 2016)
--------------------------
* Optional: efficient generation of function signatures at compile time using C++14
* Switched to a simpler and more general way of dealing with function default
arguments. Unused keyword arguments in function calls are now detected and
cause errors as expected
* New ``keep_alive`` call policy analogous to Boost.Python's ``with_custodian_and_ward``
* New ``pybind11::base<>`` attribute to indicate a subclass relationship
* Improved interface for RAII type wrappers in ``pytypes.h``
* Use RAII type wrappers consistently within pybind11 itself. This
fixes various potential refcount leaks when exceptions occur
* Added new ``bytes`` RAII type wrapper (maps to ``string`` in Python 2.7)
* Made handle and related RAII classes const correct, using them more
consistently everywhere now
* Got rid of the ugly ``__pybind11__`` attributes on the Python side---they are
now stored in a C++ hash table that is not visible in Python
* Fixed refcount leaks involving NumPy arrays and bound functions
* Vastly improved handling of shared/smart pointers
* Removed an unnecessary copy operation in ``pybind11::vectorize``
* Fixed naming clashes when both pybind11 and NumPy headers are included
* Added conversions for additional exception types
* Documentation improvements (using multiple extension modules, smart pointers,
other minor clarifications)
* unified infrastructure for parsing variadic arguments in ``class_`` and cpp_function
* Fixed license text (was: ZLIB, should have been: 3-clause BSD)
* Python 3.2 compatibility
* Fixed remaining issues when accessing types in another plugin module
* Added enum comparison and casting methods
* Improved SFINAE-based detection of whether types are copy-constructible
* Eliminated many warnings about unused variables and the use of ``offsetof()``
* Support for ``std::array<>`` conversions
1.1 (December 7, 2015)
--------------------------
* Documentation improvements (GIL, wrapping functions, casting, fixed many typos)
* Generalized conversion of integer types
* Improved support for casting function objects
* Improved support for ``std::shared_ptr<>`` conversions
* Initial support for ``std::set<>`` conversions
* Fixed type resolution issue for types defined in a separate plugin module
* Cmake build system improvements
* Factored out generic functionality to non-templated code (smaller code size)
* Added a code size / compile time benchmark vs Boost.Python
* Added an appveyor CI script
1.0 (October 15, 2015)
------------------------
* Initial release
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/changelog.rst | changelog.rst |
.. _compiling:
Build systems
#############
Building with setuptools
========================
For projects on PyPI, building with setuptools is the way to go. Sylvain Corlay
has kindly provided an example project which shows how to set up everything,
including automatic generation of documentation using Sphinx. Please refer to
the [python_example]_ repository.
.. [python_example] https://github.com/pybind/python_example
Building with cppimport
========================
[cppimport]_ is a small Python import hook that determines whether there is a C++
source file whose name matches the requested module. If there is, the file is
compiled as a Python extension using pybind11 and placed in the same folder as
the C++ source file. Python is then able to find the module and load it.
.. [cppimport] https://github.com/tbenthompson/cppimport
.. _cmake:
Building with CMake
===================
For C++ codebases that have an existing CMake-based build system, a Python
extension module can be created with just a few lines of code:
.. code-block:: cmake
cmake_minimum_required(VERSION 2.8.12)
project(example)
add_subdirectory(pybind11)
pybind11_add_module(example example.cpp)
This assumes that the pybind11 repository is located in a subdirectory named
:file:`pybind11` and that the code is located in a file named :file:`example.cpp`.
The CMake command ``add_subdirectory`` will import the pybind11 project which
provides the ``pybind11_add_module`` function. It will take care of all the
details needed to build a Python extension module on any platform.
A working sample project, including a way to invoke CMake from :file:`setup.py` for
PyPI integration, can be found in the [cmake_example]_ repository.
.. [cmake_example] https://github.com/pybind/cmake_example
pybind11_add_module
-------------------
To ease the creation of Python extension modules, pybind11 provides a CMake
function with the following signature:
.. code-block:: cmake
pybind11_add_module(<name> [MODULE | SHARED] [EXCLUDE_FROM_ALL]
[NO_EXTRAS] [SYSTEM] [THIN_LTO] source1 [source2 ...])
This function behaves very much like CMake's builtin ``add_library`` (in fact,
it's a wrapper function around that command). It will add a library target
called ``<name>`` to be built from the listed source files. In addition, it
will take care of all the Python-specific compiler and linker flags as well
as the OS- and Python-version-specific file extension. The produced target
``<name>`` can be further manipulated with regular CMake commands.
``MODULE`` or ``SHARED`` may be given to specify the type of library. If no
type is given, ``MODULE`` is used by default which ensures the creation of a
Python-exclusive module. Specifying ``SHARED`` will create a more traditional
dynamic library which can also be linked from elsewhere. ``EXCLUDE_FROM_ALL``
removes this target from the default build (see CMake docs for details).
Since pybind11 is a template library, ``pybind11_add_module`` adds compiler
flags to ensure high quality code generation without bloat arising from long
symbol names and duplication of code in different translation units. It
sets default visibility to *hidden*, which is required for some pybind11
features and functionality when attempting to load multiple pybind11 modules
compiled under different pybind11 versions. It also adds additional flags
enabling LTO (Link Time Optimization) and strip unneeded symbols. See the
:ref:`FAQ entry <faq:symhidden>` for a more detailed explanation. These
latter optimizations are never applied in ``Debug`` mode. If ``NO_EXTRAS`` is
given, they will always be disabled, even in ``Release`` mode. However, this
will result in code bloat and is generally not recommended.
By default, pybind11 and Python headers will be included with ``-I``. In order
to include pybind11 as system library, e.g. to avoid warnings in downstream
code with warn-levels outside of pybind11's scope, set the option ``SYSTEM``.
As stated above, LTO is enabled by default. Some newer compilers also support
different flavors of LTO such as `ThinLTO`_. Setting ``THIN_LTO`` will cause
the function to prefer this flavor if available. The function falls back to
regular LTO if ``-flto=thin`` is not available.
.. _ThinLTO: http://clang.llvm.org/docs/ThinLTO.html
Configuration variables
-----------------------
By default, pybind11 will compile modules with the C++14 standard, if available
on the target compiler, falling back to C++11 if C++14 support is not
available. Note, however, that this default is subject to change: future
pybind11 releases are expected to migrate to newer C++ standards as they become
available. To override this, the standard flag can be given explicitly in
``PYBIND11_CPP_STANDARD``:
.. code-block:: cmake
# Use just one of these:
# GCC/clang:
set(PYBIND11_CPP_STANDARD -std=c++11)
set(PYBIND11_CPP_STANDARD -std=c++14)
set(PYBIND11_CPP_STANDARD -std=c++1z) # Experimental C++17 support
# MSVC:
set(PYBIND11_CPP_STANDARD /std:c++14)
set(PYBIND11_CPP_STANDARD /std:c++latest) # Enables some MSVC C++17 features
add_subdirectory(pybind11) # or find_package(pybind11)
Note that this and all other configuration variables must be set **before** the
call to ``add_subdirectory`` or ``find_package``. The variables can also be set
when calling CMake from the command line using the ``-D<variable>=<value>`` flag.
The target Python version can be selected by setting ``PYBIND11_PYTHON_VERSION``
or an exact Python installation can be specified with ``PYTHON_EXECUTABLE``.
For example:
.. code-block:: bash
cmake -DPYBIND11_PYTHON_VERSION=3.6 ..
# or
cmake -DPYTHON_EXECUTABLE=path/to/python ..
find_package vs. add_subdirectory
---------------------------------
For CMake-based projects that don't include the pybind11 repository internally,
an external installation can be detected through ``find_package(pybind11)``.
See the `Config file`_ docstring for details of relevant CMake variables.
.. code-block:: cmake
cmake_minimum_required(VERSION 2.8.12)
project(example)
find_package(pybind11 REQUIRED)
pybind11_add_module(example example.cpp)
Note that ``find_package(pybind11)`` will only work correctly if pybind11
has been correctly installed on the system, e. g. after downloading or cloning
the pybind11 repository :
.. code-block:: bash
cd pybind11
mkdir build
cd build
cmake ..
make install
Once detected, the aforementioned ``pybind11_add_module`` can be employed as
before. The function usage and configuration variables are identical no matter
if pybind11 is added as a subdirectory or found as an installed package. You
can refer to the same [cmake_example]_ repository for a full sample project
-- just swap out ``add_subdirectory`` for ``find_package``.
.. _Config file: https://github.com/pybind/pybind11/blob/master/tools/pybind11Config.cmake.in
Advanced: interface library target
----------------------------------
When using a version of CMake greater than 3.0, pybind11 can additionally
be used as a special *interface library* . The target ``pybind11::module``
is available with pybind11 headers, Python headers and libraries as needed,
and C++ compile definitions attached. This target is suitable for linking
to an independently constructed (through ``add_library``, not
``pybind11_add_module``) target in the consuming project.
.. code-block:: cmake
cmake_minimum_required(VERSION 3.0)
project(example)
find_package(pybind11 REQUIRED) # or add_subdirectory(pybind11)
add_library(example MODULE main.cpp)
target_link_libraries(example PRIVATE pybind11::module)
set_target_properties(example PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}"
SUFFIX "${PYTHON_MODULE_EXTENSION}")
.. warning::
Since pybind11 is a metatemplate library, it is crucial that certain
compiler flags are provided to ensure high quality code generation. In
contrast to the ``pybind11_add_module()`` command, the CMake interface
library only provides the *minimal* set of parameters to ensure that the
code using pybind11 compiles, but it does **not** pass these extra compiler
flags (i.e. this is up to you).
These include Link Time Optimization (``-flto`` on GCC/Clang/ICPC, ``/GL``
and ``/LTCG`` on Visual Studio) and .OBJ files with many sections on Visual
Studio (``/bigobj``). The :ref:`FAQ <faq:symhidden>` contains an
explanation on why these are needed.
Embedding the Python interpreter
--------------------------------
In addition to extension modules, pybind11 also supports embedding Python into
a C++ executable or library. In CMake, simply link with the ``pybind11::embed``
target. It provides everything needed to get the interpreter running. The Python
headers and libraries are attached to the target. Unlike ``pybind11::module``,
there is no need to manually set any additional properties here. For more
information about usage in C++, see :doc:`/advanced/embedding`.
.. code-block:: cmake
cmake_minimum_required(VERSION 3.0)
project(example)
find_package(pybind11 REQUIRED) # or add_subdirectory(pybind11)
add_executable(example main.cpp)
target_link_libraries(example PRIVATE pybind11::embed)
.. _building_manually:
Building manually
=================
pybind11 is a header-only library, hence it is not necessary to link against
any special libraries and there are no intermediate (magic) translation steps.
On Linux, you can compile an example such as the one given in
:ref:`simple_example` using the following command:
.. code-block:: bash
$ c++ -O3 -Wall -shared -std=c++11 -fPIC `python3 -m pybind11 --includes` example.cpp -o example`python3-config --extension-suffix`
The flags given here assume that you're using Python 3. For Python 2, just
change the executable appropriately (to ``python`` or ``python2``).
The ``python3 -m pybind11 --includes`` command fetches the include paths for
both pybind11 and Python headers. This assumes that pybind11 has been installed
using ``pip`` or ``conda``. If it hasn't, you can also manually specify
``-I <path-to-pybind11>/include`` together with the Python includes path
``python3-config --includes``.
Note that Python 2.7 modules don't use a special suffix, so you should simply
use ``example.so`` instead of ``example`python3-config --extension-suffix```.
Besides, the ``--extension-suffix`` option may or may not be available, depending
on the distribution; in the latter case, the module extension can be manually
set to ``.so``.
On Mac OS: the build command is almost the same but it also requires passing
the ``-undefined dynamic_lookup`` flag so as to ignore missing symbols when
building the module:
.. code-block:: bash
$ c++ -O3 -Wall -shared -std=c++11 -undefined dynamic_lookup `python3 -m pybind11 --includes` example.cpp -o example`python3-config --extension-suffix`
In general, it is advisable to include several additional build parameters
that can considerably reduce the size of the created binary. Refer to section
:ref:`cmake` for a detailed example of a suitable cross-platform CMake-based
build system that works on all platforms including Windows.
.. note::
On Linux and macOS, it's better to (intentionally) not link against
``libpython``. The symbols will be resolved when the extension library
is loaded into a Python binary. This is preferable because you might
have several different installations of a given Python version (e.g. the
system-provided Python, and one that ships with a piece of commercial
software). In this way, the plugin will work with both versions, instead
of possibly importing a second Python library into a process that already
contains one (which will lead to a segfault).
Generating binding code automatically
=====================================
The ``Binder`` project is a tool for automatic generation of pybind11 binding
code by introspecting existing C++ codebases using LLVM/Clang. See the
[binder]_ documentation for details.
.. [binder] http://cppbinder.readthedocs.io/en/latest/about.html
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/compiling.rst | compiling.rst |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pybind11 documentation build configuration file, created by
# sphinx-quickstart on Sun Oct 11 19:23:48 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import subprocess
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['breathe']
breathe_projects = {'pybind11': '.build/doxygenxml/'}
breathe_default_project = 'pybind11'
breathe_domain_by_extension = {'h': 'cpp'}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pybind11'
copyright = '2017, Wenzel Jakob'
author = 'Wenzel Jakob'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.5'
# The full version, including alpha/beta/rc tags.
release = '2.5.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['.build', 'release.rst']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'monokai'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_context = {
'css_files': [
'_static/theme_overrides.css'
]
}
else:
html_context = {
'css_files': [
'//media.readthedocs.org/css/sphinx_rtd_theme.css',
'//media.readthedocs.org/css/readthedocs-doc-embed.css',
'_static/theme_overrides.css'
]
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pybind11doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': '\DeclareUnicodeCharacter{00A0}{}',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pybind11.tex', 'pybind11 Documentation',
'Wenzel Jakob', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = 'pybind11-logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pybind11', 'pybind11 Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pybind11', 'pybind11 Documentation',
author, 'pybind11', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
primary_domain = 'cpp'
highlight_language = 'cpp'
def generate_doxygen_xml(app):
build_dir = os.path.join(app.confdir, '.build')
if not os.path.exists(build_dir):
os.mkdir(build_dir)
try:
subprocess.call(['doxygen', '--version'])
retcode = subprocess.call(['doxygen'], cwd=app.confdir)
if retcode < 0:
sys.stderr.write("doxygen error code: {}\n".format(-retcode))
except OSError as e:
sys.stderr.write("doxygen execution failed: {}\n".format(e))
def setup(app):
"""Add hook for building doxygen xml when needed"""
app.connect("builder-inited", generate_doxygen_xml)
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/conf.py | conf.py |
Benchmark
=========
The following is the result of a synthetic benchmark comparing both compilation
time and module size of pybind11 against Boost.Python. A detailed report about a
Boost.Python to pybind11 conversion of a real project is available here: [#f1]_.
.. [#f1] http://graylab.jhu.edu/RosettaCon2016/PyRosetta-4.pdf
Setup
-----
A python script (see the ``docs/benchmark.py`` file) was used to generate a set
of files with dummy classes whose count increases for each successive benchmark
(between 1 and 2048 classes in powers of two). Each class has four methods with
a randomly generated signature with a return value and four arguments. (There
was no particular reason for this setup other than the desire to generate many
unique function signatures whose count could be controlled in a simple way.)
Here is an example of the binding code for one class:
.. code-block:: cpp
...
class cl034 {
public:
cl279 *fn_000(cl084 *, cl057 *, cl065 *, cl042 *);
cl025 *fn_001(cl098 *, cl262 *, cl414 *, cl121 *);
cl085 *fn_002(cl445 *, cl297 *, cl145 *, cl421 *);
cl470 *fn_003(cl200 *, cl323 *, cl332 *, cl492 *);
};
...
PYBIND11_MODULE(example, m) {
...
py::class_<cl034>(m, "cl034")
.def("fn_000", &cl034::fn_000)
.def("fn_001", &cl034::fn_001)
.def("fn_002", &cl034::fn_002)
.def("fn_003", &cl034::fn_003)
...
}
The Boost.Python version looks almost identical except that a return value
policy had to be specified as an argument to ``def()``. For both libraries,
compilation was done with
.. code-block:: bash
Apple LLVM version 7.0.2 (clang-700.1.81)
and the following compilation flags
.. code-block:: bash
g++ -Os -shared -rdynamic -undefined dynamic_lookup -fvisibility=hidden -std=c++14
Compilation time
----------------
The following log-log plot shows how the compilation time grows for an
increasing number of class and function declarations. pybind11 includes many
fewer headers, which initially leads to shorter compilation times, but the
performance is ultimately fairly similar (pybind11 is 19.8 seconds faster for
the largest largest file with 2048 classes and a total of 8192 methods -- a
modest **1.2x** speedup relative to Boost.Python, which required 116.35
seconds).
.. only:: not latex
.. image:: pybind11_vs_boost_python1.svg
.. only:: latex
.. image:: pybind11_vs_boost_python1.png
Module size
-----------
Differences between the two libraries become much more pronounced when
considering the file size of the generated Python plugin: for the largest file,
the binary generated by Boost.Python required 16.8 MiB, which was **2.17
times** / **9.1 megabytes** larger than the output generated by pybind11. For
very small inputs, Boost.Python has an edge in the plot below -- however, note
that it stores many definitions in an external library, whose size was not
included here, hence the comparison is slightly shifted in Boost.Python's
favor.
.. only:: not latex
.. image:: pybind11_vs_boost_python2.svg
.. only:: latex
.. image:: pybind11_vs_boost_python2.png
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/benchmark.rst | benchmark.rst |
.. _reference:
.. warning::
Please be advised that the reference documentation discussing pybind11
internals is currently incomplete. Please refer to the previous sections
and the pybind11 header files for the nitty gritty details.
Reference
#########
.. _macros:
Macros
======
.. doxygendefine:: PYBIND11_MODULE
.. _core_types:
Convenience classes for arbitrary Python types
==============================================
Common member functions
-----------------------
.. doxygenclass:: object_api
:members:
Without reference counting
--------------------------
.. doxygenclass:: handle
:members:
With reference counting
-----------------------
.. doxygenclass:: object
:members:
.. doxygenfunction:: reinterpret_borrow
.. doxygenfunction:: reinterpret_steal
Convenience classes for specific Python types
=============================================
.. doxygenclass:: module
:members:
.. doxygengroup:: pytypes
:members:
.. _extras:
Passing extra arguments to ``def`` or ``class_``
================================================
.. doxygengroup:: annotations
:members:
Embedding the interpreter
=========================
.. doxygendefine:: PYBIND11_EMBEDDED_MODULE
.. doxygenfunction:: initialize_interpreter
.. doxygenfunction:: finalize_interpreter
.. doxygenclass:: scoped_interpreter
Redirecting C++ streams
=======================
.. doxygenclass:: scoped_ostream_redirect
.. doxygenclass:: scoped_estream_redirect
.. doxygenfunction:: add_ostream_redirect
Python built-in functions
=========================
.. doxygengroup:: python_builtins
:members:
Inheritance
===========
See :doc:`/classes` and :doc:`/advanced/classes` for more detail.
.. doxygendefine:: PYBIND11_OVERLOAD
.. doxygendefine:: PYBIND11_OVERLOAD_PURE
.. doxygendefine:: PYBIND11_OVERLOAD_NAME
.. doxygendefine:: PYBIND11_OVERLOAD_PURE_NAME
.. doxygenfunction:: get_overload
Exceptions
==========
.. doxygenclass:: error_already_set
:members:
.. doxygenclass:: builtin_exception
:members:
Literals
========
.. doxygennamespace:: literals
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/reference.rst | reference.rst |
import random
import os
import time
import datetime as dt
nfns = 4 # Functions per class
nargs = 4 # Arguments per function
def generate_dummy_code_pybind11(nclasses=10):
decl = ""
bindings = ""
for cl in range(nclasses):
decl += "class cl%03i;\n" % cl
decl += '\n'
for cl in range(nclasses):
decl += "class cl%03i {\n" % cl
decl += "public:\n"
bindings += ' py::class_<cl%03i>(m, "cl%03i")\n' % (cl, cl)
for fn in range(nfns):
ret = random.randint(0, nclasses - 1)
params = [random.randint(0, nclasses - 1) for i in range(nargs)]
decl += " cl%03i *fn_%03i(" % (ret, fn)
decl += ", ".join("cl%03i *" % p for p in params)
decl += ");\n"
bindings += ' .def("fn_%03i", &cl%03i::fn_%03i)\n' % \
(fn, cl, fn)
decl += "};\n\n"
bindings += ' ;\n'
result = "#include <pybind11/pybind11.h>\n\n"
result += "namespace py = pybind11;\n\n"
result += decl + '\n'
result += "PYBIND11_MODULE(example, m) {\n"
result += bindings
result += "}"
return result
def generate_dummy_code_boost(nclasses=10):
decl = ""
bindings = ""
for cl in range(nclasses):
decl += "class cl%03i;\n" % cl
decl += '\n'
for cl in range(nclasses):
decl += "class cl%03i {\n" % cl
decl += "public:\n"
bindings += ' py::class_<cl%03i>("cl%03i")\n' % (cl, cl)
for fn in range(nfns):
ret = random.randint(0, nclasses - 1)
params = [random.randint(0, nclasses - 1) for i in range(nargs)]
decl += " cl%03i *fn_%03i(" % (ret, fn)
decl += ", ".join("cl%03i *" % p for p in params)
decl += ");\n"
bindings += ' .def("fn_%03i", &cl%03i::fn_%03i, py::return_value_policy<py::manage_new_object>())\n' % \
(fn, cl, fn)
decl += "};\n\n"
bindings += ' ;\n'
result = "#include <boost/python.hpp>\n\n"
result += "namespace py = boost::python;\n\n"
result += decl + '\n'
result += "BOOST_PYTHON_MODULE(example) {\n"
result += bindings
result += "}"
return result
for codegen in [generate_dummy_code_pybind11, generate_dummy_code_boost]:
print ("{")
for i in range(0, 10):
nclasses = 2 ** i
with open("test.cpp", "w") as f:
f.write(codegen(nclasses))
n1 = dt.datetime.now()
os.system("g++ -Os -shared -rdynamic -undefined dynamic_lookup "
"-fvisibility=hidden -std=c++14 test.cpp -I include "
"-I /System/Library/Frameworks/Python.framework/Headers -o test.so")
n2 = dt.datetime.now()
elapsed = (n2 - n1).total_seconds()
size = os.stat('test.so').st_size
print(" {%i, %f, %i}," % (nclasses * nfns, elapsed, size))
print ("}")
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/benchmark.py | benchmark.py |
.. _basics:
First steps
###########
This sections demonstrates the basic features of pybind11. Before getting
started, make sure that development environment is set up to compile the
included set of test cases.
Compiling the test cases
========================
Linux/MacOS
-----------
On Linux you'll need to install the **python-dev** or **python3-dev** packages as
well as **cmake**. On Mac OS, the included python version works out of the box,
but **cmake** must still be installed.
After installing the prerequisites, run
.. code-block:: bash
mkdir build
cd build
cmake ..
make check -j 4
The last line will both compile and run the tests.
Windows
-------
On Windows, only **Visual Studio 2015** and newer are supported since pybind11 relies
on various C++11 language features that break older versions of Visual Studio.
To compile and run the tests:
.. code-block:: batch
mkdir build
cd build
cmake ..
cmake --build . --config Release --target check
This will create a Visual Studio project, compile and run the target, all from the
command line.
.. Note::
If all tests fail, make sure that the Python binary and the testcases are compiled
for the same processor type and bitness (i.e. either **i386** or **x86_64**). You
can specify **x86_64** as the target architecture for the generated Visual Studio
project using ``cmake -A x64 ..``.
.. seealso::
Advanced users who are already familiar with Boost.Python may want to skip
the tutorial and look at the test cases in the :file:`tests` directory,
which exercise all features of pybind11.
Header and namespace conventions
================================
For brevity, all code examples assume that the following two lines are present:
.. code-block:: cpp
#include <pybind11/pybind11.h>
namespace py = pybind11;
Some features may require additional headers, but those will be specified as needed.
.. _simple_example:
Creating bindings for a simple function
=======================================
Let's start by creating Python bindings for an extremely simple function, which
adds two numbers and returns their result:
.. code-block:: cpp
int add(int i, int j) {
return i + j;
}
For simplicity [#f1]_, we'll put both this function and the binding code into
a file named :file:`example.cpp` with the following contents:
.. code-block:: cpp
#include <pybind11/pybind11.h>
int add(int i, int j) {
return i + j;
}
PYBIND11_MODULE(example, m) {
m.doc() = "pybind11 example plugin"; // optional module docstring
m.def("add", &add, "A function which adds two numbers");
}
.. [#f1] In practice, implementation and binding code will generally be located
in separate files.
The :func:`PYBIND11_MODULE` macro creates a function that will be called when an
``import`` statement is issued from within Python. The module name (``example``)
is given as the first macro argument (it should not be in quotes). The second
argument (``m``) defines a variable of type :class:`py::module <module>` which
is the main interface for creating bindings. The method :func:`module::def`
generates binding code that exposes the ``add()`` function to Python.
.. note::
Notice how little code was needed to expose our function to Python: all
details regarding the function's parameters and return value were
automatically inferred using template metaprogramming. This overall
approach and the used syntax are borrowed from Boost.Python, though the
underlying implementation is very different.
pybind11 is a header-only library, hence it is not necessary to link against
any special libraries and there are no intermediate (magic) translation steps.
On Linux, the above example can be compiled using the following command:
.. code-block:: bash
$ c++ -O3 -Wall -shared -std=c++11 -fPIC `python3 -m pybind11 --includes` example.cpp -o example`python3-config --extension-suffix`
For more details on the required compiler flags on Linux and MacOS, see
:ref:`building_manually`. For complete cross-platform compilation instructions,
refer to the :ref:`compiling` page.
The `python_example`_ and `cmake_example`_ repositories are also a good place
to start. They are both complete project examples with cross-platform build
systems. The only difference between the two is that `python_example`_ uses
Python's ``setuptools`` to build the module, while `cmake_example`_ uses CMake
(which may be preferable for existing C++ projects).
.. _python_example: https://github.com/pybind/python_example
.. _cmake_example: https://github.com/pybind/cmake_example
Building the above C++ code will produce a binary module file that can be
imported to Python. Assuming that the compiled module is located in the
current directory, the following interactive Python session shows how to
load and execute the example:
.. code-block:: pycon
$ python
Python 2.7.10 (default, Aug 22 2015, 20:33:39)
[GCC 4.2.1 Compatible Apple LLVM 7.0.0 (clang-700.0.59.1)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> import example
>>> example.add(1, 2)
3L
>>>
.. _keyword_args:
Keyword arguments
=================
With a simple code modification, it is possible to inform Python about the
names of the arguments ("i" and "j" in this case).
.. code-block:: cpp
m.def("add", &add, "A function which adds two numbers",
py::arg("i"), py::arg("j"));
:class:`arg` is one of several special tag classes which can be used to pass
metadata into :func:`module::def`. With this modified binding code, we can now
call the function using keyword arguments, which is a more readable alternative
particularly for functions taking many parameters:
.. code-block:: pycon
>>> import example
>>> example.add(i=1, j=2)
3L
The keyword names also appear in the function signatures within the documentation.
.. code-block:: pycon
>>> help(example)
....
FUNCTIONS
add(...)
Signature : (i: int, j: int) -> int
A function which adds two numbers
A shorter notation for named arguments is also available:
.. code-block:: cpp
// regular notation
m.def("add1", &add, py::arg("i"), py::arg("j"));
// shorthand
using namespace pybind11::literals;
m.def("add2", &add, "i"_a, "j"_a);
The :var:`_a` suffix forms a C++11 literal which is equivalent to :class:`arg`.
Note that the literal operator must first be made visible with the directive
``using namespace pybind11::literals``. This does not bring in anything else
from the ``pybind11`` namespace except for literals.
.. _default_args:
Default arguments
=================
Suppose now that the function to be bound has default arguments, e.g.:
.. code-block:: cpp
int add(int i = 1, int j = 2) {
return i + j;
}
Unfortunately, pybind11 cannot automatically extract these parameters, since they
are not part of the function's type information. However, they are simple to specify
using an extension of :class:`arg`:
.. code-block:: cpp
m.def("add", &add, "A function which adds two numbers",
py::arg("i") = 1, py::arg("j") = 2);
The default values also appear within the documentation.
.. code-block:: pycon
>>> help(example)
....
FUNCTIONS
add(...)
Signature : (i: int = 1, j: int = 2) -> int
A function which adds two numbers
The shorthand notation is also available for default arguments:
.. code-block:: cpp
// regular notation
m.def("add1", &add, py::arg("i") = 1, py::arg("j") = 2);
// shorthand
m.def("add2", &add, "i"_a=1, "j"_a=2);
Exporting variables
===================
To expose a value from C++, use the ``attr`` function to register it in a
module as shown below. Built-in types and general objects (more on that later)
are automatically converted when assigned as attributes, and can be explicitly
converted using the function ``py::cast``.
.. code-block:: cpp
PYBIND11_MODULE(example, m) {
m.attr("the_answer") = 42;
py::object world = py::cast("World");
m.attr("what") = world;
}
These are then accessible from Python:
.. code-block:: pycon
>>> import example
>>> example.the_answer
42
>>> example.what
'World'
.. _supported_types:
Supported data types
====================
A large number of data types are supported out of the box and can be used
seamlessly as functions arguments, return values or with ``py::cast`` in general.
For a full overview, see the :doc:`advanced/cast/index` section.
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/basics.rst | basics.rst |
.. _classes:
Object-oriented code
####################
Creating bindings for a custom type
===================================
Let's now look at a more complex example where we'll create bindings for a
custom C++ data structure named ``Pet``. Its definition is given below:
.. code-block:: cpp
struct Pet {
Pet(const std::string &name) : name(name) { }
void setName(const std::string &name_) { name = name_; }
const std::string &getName() const { return name; }
std::string name;
};
The binding code for ``Pet`` looks as follows:
.. code-block:: cpp
#include <pybind11/pybind11.h>
namespace py = pybind11;
PYBIND11_MODULE(example, m) {
py::class_<Pet>(m, "Pet")
.def(py::init<const std::string &>())
.def("setName", &Pet::setName)
.def("getName", &Pet::getName);
}
:class:`class_` creates bindings for a C++ *class* or *struct*-style data
structure. :func:`init` is a convenience function that takes the types of a
constructor's parameters as template arguments and wraps the corresponding
constructor (see the :ref:`custom_constructors` section for details). An
interactive Python session demonstrating this example is shown below:
.. code-block:: pycon
% python
>>> import example
>>> p = example.Pet('Molly')
>>> print(p)
<example.Pet object at 0x10cd98060>
>>> p.getName()
u'Molly'
>>> p.setName('Charly')
>>> p.getName()
u'Charly'
.. seealso::
Static member functions can be bound in the same way using
:func:`class_::def_static`.
Keyword and default arguments
=============================
It is possible to specify keyword and default arguments using the syntax
discussed in the previous chapter. Refer to the sections :ref:`keyword_args`
and :ref:`default_args` for details.
Binding lambda functions
========================
Note how ``print(p)`` produced a rather useless summary of our data structure in the example above:
.. code-block:: pycon
>>> print(p)
<example.Pet object at 0x10cd98060>
To address this, we could bind an utility function that returns a human-readable
summary to the special method slot named ``__repr__``. Unfortunately, there is no
suitable functionality in the ``Pet`` data structure, and it would be nice if
we did not have to change it. This can easily be accomplished by binding a
Lambda function instead:
.. code-block:: cpp
py::class_<Pet>(m, "Pet")
.def(py::init<const std::string &>())
.def("setName", &Pet::setName)
.def("getName", &Pet::getName)
.def("__repr__",
[](const Pet &a) {
return "<example.Pet named '" + a.name + "'>";
}
);
Both stateless [#f1]_ and stateful lambda closures are supported by pybind11.
With the above change, the same Python code now produces the following output:
.. code-block:: pycon
>>> print(p)
<example.Pet named 'Molly'>
.. [#f1] Stateless closures are those with an empty pair of brackets ``[]`` as the capture object.
.. _properties:
Instance and static fields
==========================
We can also directly expose the ``name`` field using the
:func:`class_::def_readwrite` method. A similar :func:`class_::def_readonly`
method also exists for ``const`` fields.
.. code-block:: cpp
py::class_<Pet>(m, "Pet")
.def(py::init<const std::string &>())
.def_readwrite("name", &Pet::name)
// ... remainder ...
This makes it possible to write
.. code-block:: pycon
>>> p = example.Pet('Molly')
>>> p.name
u'Molly'
>>> p.name = 'Charly'
>>> p.name
u'Charly'
Now suppose that ``Pet::name`` was a private internal variable
that can only be accessed via setters and getters.
.. code-block:: cpp
class Pet {
public:
Pet(const std::string &name) : name(name) { }
void setName(const std::string &name_) { name = name_; }
const std::string &getName() const { return name; }
private:
std::string name;
};
In this case, the method :func:`class_::def_property`
(:func:`class_::def_property_readonly` for read-only data) can be used to
provide a field-like interface within Python that will transparently call
the setter and getter functions:
.. code-block:: cpp
py::class_<Pet>(m, "Pet")
.def(py::init<const std::string &>())
.def_property("name", &Pet::getName, &Pet::setName)
// ... remainder ...
Write only properties can be defined by passing ``nullptr`` as the
input for the read function.
.. seealso::
Similar functions :func:`class_::def_readwrite_static`,
:func:`class_::def_readonly_static` :func:`class_::def_property_static`,
and :func:`class_::def_property_readonly_static` are provided for binding
static variables and properties. Please also see the section on
:ref:`static_properties` in the advanced part of the documentation.
Dynamic attributes
==================
Native Python classes can pick up new attributes dynamically:
.. code-block:: pycon
>>> class Pet:
... name = 'Molly'
...
>>> p = Pet()
>>> p.name = 'Charly' # overwrite existing
>>> p.age = 2 # dynamically add a new attribute
By default, classes exported from C++ do not support this and the only writable
attributes are the ones explicitly defined using :func:`class_::def_readwrite`
or :func:`class_::def_property`.
.. code-block:: cpp
py::class_<Pet>(m, "Pet")
.def(py::init<>())
.def_readwrite("name", &Pet::name);
Trying to set any other attribute results in an error:
.. code-block:: pycon
>>> p = example.Pet()
>>> p.name = 'Charly' # OK, attribute defined in C++
>>> p.age = 2 # fail
AttributeError: 'Pet' object has no attribute 'age'
To enable dynamic attributes for C++ classes, the :class:`py::dynamic_attr` tag
must be added to the :class:`py::class_` constructor:
.. code-block:: cpp
py::class_<Pet>(m, "Pet", py::dynamic_attr())
.def(py::init<>())
.def_readwrite("name", &Pet::name);
Now everything works as expected:
.. code-block:: pycon
>>> p = example.Pet()
>>> p.name = 'Charly' # OK, overwrite value in C++
>>> p.age = 2 # OK, dynamically add a new attribute
>>> p.__dict__ # just like a native Python class
{'age': 2}
Note that there is a small runtime cost for a class with dynamic attributes.
Not only because of the addition of a ``__dict__``, but also because of more
expensive garbage collection tracking which must be activated to resolve
possible circular references. Native Python classes incur this same cost by
default, so this is not anything to worry about. By default, pybind11 classes
are more efficient than native Python classes. Enabling dynamic attributes
just brings them on par.
.. _inheritance:
Inheritance and automatic downcasting
=====================================
Suppose now that the example consists of two data structures with an
inheritance relationship:
.. code-block:: cpp
struct Pet {
Pet(const std::string &name) : name(name) { }
std::string name;
};
struct Dog : Pet {
Dog(const std::string &name) : Pet(name) { }
std::string bark() const { return "woof!"; }
};
There are two different ways of indicating a hierarchical relationship to
pybind11: the first specifies the C++ base class as an extra template
parameter of the :class:`class_`:
.. code-block:: cpp
py::class_<Pet>(m, "Pet")
.def(py::init<const std::string &>())
.def_readwrite("name", &Pet::name);
// Method 1: template parameter:
py::class_<Dog, Pet /* <- specify C++ parent type */>(m, "Dog")
.def(py::init<const std::string &>())
.def("bark", &Dog::bark);
Alternatively, we can also assign a name to the previously bound ``Pet``
:class:`class_` object and reference it when binding the ``Dog`` class:
.. code-block:: cpp
py::class_<Pet> pet(m, "Pet");
pet.def(py::init<const std::string &>())
.def_readwrite("name", &Pet::name);
// Method 2: pass parent class_ object:
py::class_<Dog>(m, "Dog", pet /* <- specify Python parent type */)
.def(py::init<const std::string &>())
.def("bark", &Dog::bark);
Functionality-wise, both approaches are equivalent. Afterwards, instances will
expose fields and methods of both types:
.. code-block:: pycon
>>> p = example.Dog('Molly')
>>> p.name
u'Molly'
>>> p.bark()
u'woof!'
The C++ classes defined above are regular non-polymorphic types with an
inheritance relationship. This is reflected in Python:
.. code-block:: cpp
// Return a base pointer to a derived instance
m.def("pet_store", []() { return std::unique_ptr<Pet>(new Dog("Molly")); });
.. code-block:: pycon
>>> p = example.pet_store()
>>> type(p) # `Dog` instance behind `Pet` pointer
Pet # no pointer downcasting for regular non-polymorphic types
>>> p.bark()
AttributeError: 'Pet' object has no attribute 'bark'
The function returned a ``Dog`` instance, but because it's a non-polymorphic
type behind a base pointer, Python only sees a ``Pet``. In C++, a type is only
considered polymorphic if it has at least one virtual function and pybind11
will automatically recognize this:
.. code-block:: cpp
struct PolymorphicPet {
virtual ~PolymorphicPet() = default;
};
struct PolymorphicDog : PolymorphicPet {
std::string bark() const { return "woof!"; }
};
// Same binding code
py::class_<PolymorphicPet>(m, "PolymorphicPet");
py::class_<PolymorphicDog, PolymorphicPet>(m, "PolymorphicDog")
.def(py::init<>())
.def("bark", &PolymorphicDog::bark);
// Again, return a base pointer to a derived instance
m.def("pet_store2", []() { return std::unique_ptr<PolymorphicPet>(new PolymorphicDog); });
.. code-block:: pycon
>>> p = example.pet_store2()
>>> type(p)
PolymorphicDog # automatically downcast
>>> p.bark()
u'woof!'
Given a pointer to a polymorphic base, pybind11 performs automatic downcasting
to the actual derived type. Note that this goes beyond the usual situation in
C++: we don't just get access to the virtual functions of the base, we get the
concrete derived type including functions and attributes that the base type may
not even be aware of.
.. seealso::
For more information about polymorphic behavior see :ref:`overriding_virtuals`.
Overloaded methods
==================
Sometimes there are several overloaded C++ methods with the same name taking
different kinds of input arguments:
.. code-block:: cpp
struct Pet {
Pet(const std::string &name, int age) : name(name), age(age) { }
void set(int age_) { age = age_; }
void set(const std::string &name_) { name = name_; }
std::string name;
int age;
};
Attempting to bind ``Pet::set`` will cause an error since the compiler does not
know which method the user intended to select. We can disambiguate by casting
them to function pointers. Binding multiple functions to the same Python name
automatically creates a chain of function overloads that will be tried in
sequence.
.. code-block:: cpp
py::class_<Pet>(m, "Pet")
.def(py::init<const std::string &, int>())
.def("set", (void (Pet::*)(int)) &Pet::set, "Set the pet's age")
.def("set", (void (Pet::*)(const std::string &)) &Pet::set, "Set the pet's name");
The overload signatures are also visible in the method's docstring:
.. code-block:: pycon
>>> help(example.Pet)
class Pet(__builtin__.object)
| Methods defined here:
|
| __init__(...)
| Signature : (Pet, str, int) -> NoneType
|
| set(...)
| 1. Signature : (Pet, int) -> NoneType
|
| Set the pet's age
|
| 2. Signature : (Pet, str) -> NoneType
|
| Set the pet's name
If you have a C++14 compatible compiler [#cpp14]_, you can use an alternative
syntax to cast the overloaded function:
.. code-block:: cpp
py::class_<Pet>(m, "Pet")
.def("set", py::overload_cast<int>(&Pet::set), "Set the pet's age")
.def("set", py::overload_cast<const std::string &>(&Pet::set), "Set the pet's name");
Here, ``py::overload_cast`` only requires the parameter types to be specified.
The return type and class are deduced. This avoids the additional noise of
``void (Pet::*)()`` as seen in the raw cast. If a function is overloaded based
on constness, the ``py::const_`` tag should be used:
.. code-block:: cpp
struct Widget {
int foo(int x, float y);
int foo(int x, float y) const;
};
py::class_<Widget>(m, "Widget")
.def("foo_mutable", py::overload_cast<int, float>(&Widget::foo))
.def("foo_const", py::overload_cast<int, float>(&Widget::foo, py::const_));
If you prefer the ``py::overload_cast`` syntax but have a C++11 compatible compiler only,
you can use ``py::detail::overload_cast_impl`` with an additional set of parentheses:
.. code-block:: cpp
template <typename... Args>
using overload_cast_ = pybind11::detail::overload_cast_impl<Args...>;
py::class_<Pet>(m, "Pet")
.def("set", overload_cast_<int>()(&Pet::set), "Set the pet's age")
.def("set", overload_cast_<const std::string &>()(&Pet::set), "Set the pet's name");
.. [#cpp14] A compiler which supports the ``-std=c++14`` flag
or Visual Studio 2015 Update 2 and newer.
.. note::
To define multiple overloaded constructors, simply declare one after the
other using the ``.def(py::init<...>())`` syntax. The existing machinery
for specifying keyword and default arguments also works.
Enumerations and internal types
===============================
Let's now suppose that the example class contains an internal enumeration type,
e.g.:
.. code-block:: cpp
struct Pet {
enum Kind {
Dog = 0,
Cat
};
Pet(const std::string &name, Kind type) : name(name), type(type) { }
std::string name;
Kind type;
};
The binding code for this example looks as follows:
.. code-block:: cpp
py::class_<Pet> pet(m, "Pet");
pet.def(py::init<const std::string &, Pet::Kind>())
.def_readwrite("name", &Pet::name)
.def_readwrite("type", &Pet::type);
py::enum_<Pet::Kind>(pet, "Kind")
.value("Dog", Pet::Kind::Dog)
.value("Cat", Pet::Kind::Cat)
.export_values();
To ensure that the ``Kind`` type is created within the scope of ``Pet``, the
``pet`` :class:`class_` instance must be supplied to the :class:`enum_`.
constructor. The :func:`enum_::export_values` function exports the enum entries
into the parent scope, which should be skipped for newer C++11-style strongly
typed enums.
.. code-block:: pycon
>>> p = Pet('Lucy', Pet.Cat)
>>> p.type
Kind.Cat
>>> int(p.type)
1L
The entries defined by the enumeration type are exposed in the ``__members__`` property:
.. code-block:: pycon
>>> Pet.Kind.__members__
{'Dog': Kind.Dog, 'Cat': Kind.Cat}
The ``name`` property returns the name of the enum value as a unicode string.
.. note::
It is also possible to use ``str(enum)``, however these accomplish different
goals. The following shows how these two approaches differ.
.. code-block:: pycon
>>> p = Pet( "Lucy", Pet.Cat )
>>> pet_type = p.type
>>> pet_type
Pet.Cat
>>> str(pet_type)
'Pet.Cat'
>>> pet_type.name
'Cat'
.. note::
When the special tag ``py::arithmetic()`` is specified to the ``enum_``
constructor, pybind11 creates an enumeration that also supports rudimentary
arithmetic and bit-level operations like comparisons, and, or, xor, negation,
etc.
.. code-block:: cpp
py::enum_<Pet::Kind>(pet, "Kind", py::arithmetic())
...
By default, these are omitted to conserve space.
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/classes.rst | classes.rst |
.. image:: pybind11-logo.png
About this project
==================
**pybind11** is a lightweight header-only library that exposes C++ types in Python
and vice versa, mainly to create Python bindings of existing C++ code. Its
goals and syntax are similar to the excellent `Boost.Python`_ library by David
Abrahams: to minimize boilerplate code in traditional extension modules by
inferring type information using compile-time introspection.
.. _Boost.Python: http://www.boost.org/doc/libs/release/libs/python/doc/index.html
The main issue with Boost.Python—and the reason for creating such a similar
project—is Boost. Boost is an enormously large and complex suite of utility
libraries that works with almost every C++ compiler in existence. This
compatibility has its cost: arcane template tricks and workarounds are
necessary to support the oldest and buggiest of compiler specimens. Now that
C++11-compatible compilers are widely available, this heavy machinery has
become an excessively large and unnecessary dependency.
Think of this library as a tiny self-contained version of Boost.Python with
everything stripped away that isn't relevant for binding generation. Without
comments, the core header files only require ~4K lines of code and depend on
Python (2.7 or 3.x, or PyPy2.7 >= 5.7) and the C++ standard library. This
compact implementation was possible thanks to some of the new C++11 language
features (specifically: tuples, lambda functions and variadic templates). Since
its creation, this library has grown beyond Boost.Python in many ways, leading
to dramatically simpler binding code in many common situations.
Core features
*************
The following core C++ features can be mapped to Python
- Functions accepting and returning custom data structures per value, reference, or pointer
- Instance methods and static methods
- Overloaded functions
- Instance attributes and static attributes
- Arbitrary exception types
- Enumerations
- Callbacks
- Iterators and ranges
- Custom operators
- Single and multiple inheritance
- STL data structures
- Smart pointers with reference counting like ``std::shared_ptr``
- Internal references with correct reference counting
- C++ classes with virtual (and pure virtual) methods can be extended in Python
Goodies
*******
In addition to the core functionality, pybind11 provides some extra goodies:
- Python 2.7, 3.x, and PyPy (PyPy2.7 >= 5.7) are supported with an
implementation-agnostic interface.
- It is possible to bind C++11 lambda functions with captured variables. The
lambda capture data is stored inside the resulting Python function object.
- pybind11 uses C++11 move constructors and move assignment operators whenever
possible to efficiently transfer custom data types.
- It's easy to expose the internal storage of custom data types through
Pythons' buffer protocols. This is handy e.g. for fast conversion between
C++ matrix classes like Eigen and NumPy without expensive copy operations.
- pybind11 can automatically vectorize functions so that they are transparently
applied to all entries of one or more NumPy array arguments.
- Python's slice-based access and assignment operations can be supported with
just a few lines of code.
- Everything is contained in just a few header files; there is no need to link
against any additional libraries.
- Binaries are generally smaller by a factor of at least 2 compared to
equivalent bindings generated by Boost.Python. A recent pybind11 conversion
of `PyRosetta`_, an enormous Boost.Python binding project, reported a binary
size reduction of **5.4x** and compile time reduction by **5.8x**.
- Function signatures are precomputed at compile time (using ``constexpr``),
leading to smaller binaries.
- With little extra effort, C++ types can be pickled and unpickled similar to
regular Python objects.
.. _PyRosetta: http://graylab.jhu.edu/RosettaCon2016/PyRosetta-4.pdf
Supported compilers
*******************
1. Clang/LLVM (any non-ancient version with C++11 support)
2. GCC 4.8 or newer
3. Microsoft Visual Studio 2015 or newer
4. Intel C++ compiler v17 or newer (v16 with pybind11 v2.0 and v15 with pybind11 v2.0 and a `workaround <https://github.com/pybind/pybind11/issues/276>`_ )
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/intro.rst | intro.rst |
Functions
#########
Before proceeding with this section, make sure that you are already familiar
with the basics of binding functions and classes, as explained in :doc:`/basics`
and :doc:`/classes`. The following guide is applicable to both free and member
functions, i.e. *methods* in Python.
.. _return_value_policies:
Return value policies
=====================
Python and C++ use fundamentally different ways of managing the memory and
lifetime of objects managed by them. This can lead to issues when creating
bindings for functions that return a non-trivial type. Just by looking at the
type information, it is not clear whether Python should take charge of the
returned value and eventually free its resources, or if this is handled on the
C++ side. For this reason, pybind11 provides a several *return value policy*
annotations that can be passed to the :func:`module::def` and
:func:`class_::def` functions. The default policy is
:enum:`return_value_policy::automatic`.
Return value policies are tricky, and it's very important to get them right.
Just to illustrate what can go wrong, consider the following simple example:
.. code-block:: cpp
/* Function declaration */
Data *get_data() { return _data; /* (pointer to a static data structure) */ }
...
/* Binding code */
m.def("get_data", &get_data); // <-- KABOOM, will cause crash when called from Python
What's going on here? When ``get_data()`` is called from Python, the return
value (a native C++ type) must be wrapped to turn it into a usable Python type.
In this case, the default return value policy (:enum:`return_value_policy::automatic`)
causes pybind11 to assume ownership of the static ``_data`` instance.
When Python's garbage collector eventually deletes the Python
wrapper, pybind11 will also attempt to delete the C++ instance (via ``operator
delete()``) due to the implied ownership. At this point, the entire application
will come crashing down, though errors could also be more subtle and involve
silent data corruption.
In the above example, the policy :enum:`return_value_policy::reference` should have
been specified so that the global data instance is only *referenced* without any
implied transfer of ownership, i.e.:
.. code-block:: cpp
m.def("get_data", &get_data, return_value_policy::reference);
On the other hand, this is not the right policy for many other situations,
where ignoring ownership could lead to resource leaks.
As a developer using pybind11, it's important to be familiar with the different
return value policies, including which situation calls for which one of them.
The following table provides an overview of available policies:
.. tabularcolumns:: |p{0.5\textwidth}|p{0.45\textwidth}|
+--------------------------------------------------+----------------------------------------------------------------------------+
| Return value policy | Description |
+==================================================+============================================================================+
| :enum:`return_value_policy::take_ownership` | Reference an existing object (i.e. do not create a new copy) and take |
| | ownership. Python will call the destructor and delete operator when the |
| | object's reference count reaches zero. Undefined behavior ensues when the |
| | C++ side does the same, or when the data was not dynamically allocated. |
+--------------------------------------------------+----------------------------------------------------------------------------+
| :enum:`return_value_policy::copy` | Create a new copy of the returned object, which will be owned by Python. |
| | This policy is comparably safe because the lifetimes of the two instances |
| | are decoupled. |
+--------------------------------------------------+----------------------------------------------------------------------------+
| :enum:`return_value_policy::move` | Use ``std::move`` to move the return value contents into a new instance |
| | that will be owned by Python. This policy is comparably safe because the |
| | lifetimes of the two instances (move source and destination) are decoupled.|
+--------------------------------------------------+----------------------------------------------------------------------------+
| :enum:`return_value_policy::reference` | Reference an existing object, but do not take ownership. The C++ side is |
| | responsible for managing the object's lifetime and deallocating it when |
| | it is no longer used. Warning: undefined behavior will ensue when the C++ |
| | side deletes an object that is still referenced and used by Python. |
+--------------------------------------------------+----------------------------------------------------------------------------+
| :enum:`return_value_policy::reference_internal` | Indicates that the lifetime of the return value is tied to the lifetime |
| | of a parent object, namely the implicit ``this``, or ``self`` argument of |
| | the called method or property. Internally, this policy works just like |
| | :enum:`return_value_policy::reference` but additionally applies a |
| | ``keep_alive<0, 1>`` *call policy* (described in the next section) that |
| | prevents the parent object from being garbage collected as long as the |
| | return value is referenced by Python. This is the default policy for |
| | property getters created via ``def_property``, ``def_readwrite``, etc. |
+--------------------------------------------------+----------------------------------------------------------------------------+
| :enum:`return_value_policy::automatic` | **Default policy.** This policy falls back to the policy |
| | :enum:`return_value_policy::take_ownership` when the return value is a |
| | pointer. Otherwise, it uses :enum:`return_value_policy::move` or |
| | :enum:`return_value_policy::copy` for rvalue and lvalue references, |
| | respectively. See above for a description of what all of these different |
| | policies do. |
+--------------------------------------------------+----------------------------------------------------------------------------+
| :enum:`return_value_policy::automatic_reference` | As above, but use policy :enum:`return_value_policy::reference` when the |
| | return value is a pointer. This is the default conversion policy for |
| | function arguments when calling Python functions manually from C++ code |
| | (i.e. via handle::operator()). You probably won't need to use this. |
+--------------------------------------------------+----------------------------------------------------------------------------+
Return value policies can also be applied to properties:
.. code-block:: cpp
class_<MyClass>(m, "MyClass")
.def_property("data", &MyClass::getData, &MyClass::setData,
py::return_value_policy::copy);
Technically, the code above applies the policy to both the getter and the
setter function, however, the setter doesn't really care about *return*
value policies which makes this a convenient terse syntax. Alternatively,
targeted arguments can be passed through the :class:`cpp_function` constructor:
.. code-block:: cpp
class_<MyClass>(m, "MyClass")
.def_property("data"
py::cpp_function(&MyClass::getData, py::return_value_policy::copy),
py::cpp_function(&MyClass::setData)
);
.. warning::
Code with invalid return value policies might access uninitialized memory or
free data structures multiple times, which can lead to hard-to-debug
non-determinism and segmentation faults, hence it is worth spending the
time to understand all the different options in the table above.
.. note::
One important aspect of the above policies is that they only apply to
instances which pybind11 has *not* seen before, in which case the policy
clarifies essential questions about the return value's lifetime and
ownership. When pybind11 knows the instance already (as identified by its
type and address in memory), it will return the existing Python object
wrapper rather than creating a new copy.
.. note::
The next section on :ref:`call_policies` discusses *call policies* that can be
specified *in addition* to a return value policy from the list above. Call
policies indicate reference relationships that can involve both return values
and parameters of functions.
.. note::
As an alternative to elaborate call policies and lifetime management logic,
consider using smart pointers (see the section on :ref:`smart_pointers` for
details). Smart pointers can tell whether an object is still referenced from
C++ or Python, which generally eliminates the kinds of inconsistencies that
can lead to crashes or undefined behavior. For functions returning smart
pointers, it is not necessary to specify a return value policy.
.. _call_policies:
Additional call policies
========================
In addition to the above return value policies, further *call policies* can be
specified to indicate dependencies between parameters or ensure a certain state
for the function call.
Keep alive
----------
In general, this policy is required when the C++ object is any kind of container
and another object is being added to the container. ``keep_alive<Nurse, Patient>``
indicates that the argument with index ``Patient`` should be kept alive at least
until the argument with index ``Nurse`` is freed by the garbage collector. Argument
indices start at one, while zero refers to the return value. For methods, index
``1`` refers to the implicit ``this`` pointer, while regular arguments begin at
index ``2``. Arbitrarily many call policies can be specified. When a ``Nurse``
with value ``None`` is detected at runtime, the call policy does nothing.
When the nurse is not a pybind11-registered type, the implementation internally
relies on the ability to create a *weak reference* to the nurse object. When
the nurse object is not a pybind11-registered type and does not support weak
references, an exception will be thrown.
Consider the following example: here, the binding code for a list append
operation ties the lifetime of the newly added element to the underlying
container:
.. code-block:: cpp
py::class_<List>(m, "List")
.def("append", &List::append, py::keep_alive<1, 2>());
For consistency, the argument indexing is identical for constructors. Index
``1`` still refers to the implicit ``this`` pointer, i.e. the object which is
being constructed. Index ``0`` refers to the return type which is presumed to
be ``void`` when a constructor is viewed like a function. The following example
ties the lifetime of the constructor element to the constructed object:
.. code-block:: cpp
py::class_<Nurse>(m, "Nurse")
.def(py::init<Patient &>(), py::keep_alive<1, 2>());
.. note::
``keep_alive`` is analogous to the ``with_custodian_and_ward`` (if Nurse,
Patient != 0) and ``with_custodian_and_ward_postcall`` (if Nurse/Patient ==
0) policies from Boost.Python.
Call guard
----------
The ``call_guard<T>`` policy allows any scope guard type ``T`` to be placed
around the function call. For example, this definition:
.. code-block:: cpp
m.def("foo", foo, py::call_guard<T>());
is equivalent to the following pseudocode:
.. code-block:: cpp
m.def("foo", [](args...) {
T scope_guard;
return foo(args...); // forwarded arguments
});
The only requirement is that ``T`` is default-constructible, but otherwise any
scope guard will work. This is very useful in combination with `gil_scoped_release`.
See :ref:`gil`.
Multiple guards can also be specified as ``py::call_guard<T1, T2, T3...>``. The
constructor order is left to right and destruction happens in reverse.
.. seealso::
The file :file:`tests/test_call_policies.cpp` contains a complete example
that demonstrates using `keep_alive` and `call_guard` in more detail.
.. _python_objects_as_args:
Python objects as arguments
===========================
pybind11 exposes all major Python types using thin C++ wrapper classes. These
wrapper classes can also be used as parameters of functions in bindings, which
makes it possible to directly work with native Python types on the C++ side.
For instance, the following statement iterates over a Python ``dict``:
.. code-block:: cpp
void print_dict(py::dict dict) {
/* Easily interact with Python types */
for (auto item : dict)
std::cout << "key=" << std::string(py::str(item.first)) << ", "
<< "value=" << std::string(py::str(item.second)) << std::endl;
}
It can be exported:
.. code-block:: cpp
m.def("print_dict", &print_dict);
And used in Python as usual:
.. code-block:: pycon
>>> print_dict({'foo': 123, 'bar': 'hello'})
key=foo, value=123
key=bar, value=hello
For more information on using Python objects in C++, see :doc:`/advanced/pycpp/index`.
Accepting \*args and \*\*kwargs
===============================
Python provides a useful mechanism to define functions that accept arbitrary
numbers of arguments and keyword arguments:
.. code-block:: python
def generic(*args, **kwargs):
... # do something with args and kwargs
Such functions can also be created using pybind11:
.. code-block:: cpp
void generic(py::args args, py::kwargs kwargs) {
/// .. do something with args
if (kwargs)
/// .. do something with kwargs
}
/// Binding code
m.def("generic", &generic);
The class ``py::args`` derives from ``py::tuple`` and ``py::kwargs`` derives
from ``py::dict``.
You may also use just one or the other, and may combine these with other
arguments as long as the ``py::args`` and ``py::kwargs`` arguments are the last
arguments accepted by the function.
Please refer to the other examples for details on how to iterate over these,
and on how to cast their entries into C++ objects. A demonstration is also
available in ``tests/test_kwargs_and_defaults.cpp``.
.. note::
When combining \*args or \*\*kwargs with :ref:`keyword_args` you should
*not* include ``py::arg`` tags for the ``py::args`` and ``py::kwargs``
arguments.
Default arguments revisited
===========================
The section on :ref:`default_args` previously discussed basic usage of default
arguments using pybind11. One noteworthy aspect of their implementation is that
default arguments are converted to Python objects right at declaration time.
Consider the following example:
.. code-block:: cpp
py::class_<MyClass>("MyClass")
.def("myFunction", py::arg("arg") = SomeType(123));
In this case, pybind11 must already be set up to deal with values of the type
``SomeType`` (via a prior instantiation of ``py::class_<SomeType>``), or an
exception will be thrown.
Another aspect worth highlighting is that the "preview" of the default argument
in the function signature is generated using the object's ``__repr__`` method.
If not available, the signature may not be very helpful, e.g.:
.. code-block:: pycon
FUNCTIONS
...
| myFunction(...)
| Signature : (MyClass, arg : SomeType = <SomeType object at 0x101b7b080>) -> NoneType
...
The first way of addressing this is by defining ``SomeType.__repr__``.
Alternatively, it is possible to specify the human-readable preview of the
default argument manually using the ``arg_v`` notation:
.. code-block:: cpp
py::class_<MyClass>("MyClass")
.def("myFunction", py::arg_v("arg", SomeType(123), "SomeType(123)"));
Sometimes it may be necessary to pass a null pointer value as a default
argument. In this case, remember to cast it to the underlying type in question,
like so:
.. code-block:: cpp
py::class_<MyClass>("MyClass")
.def("myFunction", py::arg("arg") = (SomeType *) nullptr);
.. _nonconverting_arguments:
Non-converting arguments
========================
Certain argument types may support conversion from one type to another. Some
examples of conversions are:
* :ref:`implicit_conversions` declared using ``py::implicitly_convertible<A,B>()``
* Calling a method accepting a double with an integer argument
* Calling a ``std::complex<float>`` argument with a non-complex python type
(for example, with a float). (Requires the optional ``pybind11/complex.h``
header).
* Calling a function taking an Eigen matrix reference with a numpy array of the
wrong type or of an incompatible data layout. (Requires the optional
``pybind11/eigen.h`` header).
This behaviour is sometimes undesirable: the binding code may prefer to raise
an error rather than convert the argument. This behaviour can be obtained
through ``py::arg`` by calling the ``.noconvert()`` method of the ``py::arg``
object, such as:
.. code-block:: cpp
m.def("floats_only", [](double f) { return 0.5 * f; }, py::arg("f").noconvert());
m.def("floats_preferred", [](double f) { return 0.5 * f; }, py::arg("f"));
Attempting the call the second function (the one without ``.noconvert()``) with
an integer will succeed, but attempting to call the ``.noconvert()`` version
will fail with a ``TypeError``:
.. code-block:: pycon
>>> floats_preferred(4)
2.0
>>> floats_only(4)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: floats_only(): incompatible function arguments. The following argument types are supported:
1. (f: float) -> float
Invoked with: 4
You may, of course, combine this with the :var:`_a` shorthand notation (see
:ref:`keyword_args`) and/or :ref:`default_args`. It is also permitted to omit
the argument name by using the ``py::arg()`` constructor without an argument
name, i.e. by specifying ``py::arg().noconvert()``.
.. note::
When specifying ``py::arg`` options it is necessary to provide the same
number of options as the bound function has arguments. Thus if you want to
enable no-convert behaviour for just one of several arguments, you will
need to specify a ``py::arg()`` annotation for each argument with the
no-convert argument modified to ``py::arg().noconvert()``.
.. _none_arguments:
Allow/Prohibiting None arguments
================================
When a C++ type registered with :class:`py::class_` is passed as an argument to
a function taking the instance as pointer or shared holder (e.g. ``shared_ptr``
or a custom, copyable holder as described in :ref:`smart_pointers`), pybind
allows ``None`` to be passed from Python which results in calling the C++
function with ``nullptr`` (or an empty holder) for the argument.
To explicitly enable or disable this behaviour, using the
``.none`` method of the :class:`py::arg` object:
.. code-block:: cpp
py::class_<Dog>(m, "Dog").def(py::init<>());
py::class_<Cat>(m, "Cat").def(py::init<>());
m.def("bark", [](Dog *dog) -> std::string {
if (dog) return "woof!"; /* Called with a Dog instance */
else return "(no dog)"; /* Called with None, dog == nullptr */
}, py::arg("dog").none(true));
m.def("meow", [](Cat *cat) -> std::string {
// Can't be called with None argument
return "meow";
}, py::arg("cat").none(false));
With the above, the Python call ``bark(None)`` will return the string ``"(no
dog)"``, while attempting to call ``meow(None)`` will raise a ``TypeError``:
.. code-block:: pycon
>>> from animals import Dog, Cat, bark, meow
>>> bark(Dog())
'woof!'
>>> meow(Cat())
'meow'
>>> bark(None)
'(no dog)'
>>> meow(None)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: meow(): incompatible function arguments. The following argument types are supported:
1. (cat: animals.Cat) -> str
Invoked with: None
The default behaviour when the tag is unspecified is to allow ``None``.
.. note::
Even when ``.none(true)`` is specified for an argument, ``None`` will be converted to a
``nullptr`` *only* for custom and :ref:`opaque <opaque>` types. Pointers to built-in types
(``double *``, ``int *``, ...) and STL types (``std::vector<T> *``, ...; if ``pybind11/stl.h``
is included) are copied when converted to C++ (see :doc:`/advanced/cast/overview`) and will
not allow ``None`` as argument. To pass optional argument of these copied types consider
using ``std::optional<T>``
Overload resolution order
=========================
When a function or method with multiple overloads is called from Python,
pybind11 determines which overload to call in two passes. The first pass
attempts to call each overload without allowing argument conversion (as if
every argument had been specified as ``py::arg().noconvert()`` as described
above).
If no overload succeeds in the no-conversion first pass, a second pass is
attempted in which argument conversion is allowed (except where prohibited via
an explicit ``py::arg().noconvert()`` attribute in the function definition).
If the second pass also fails a ``TypeError`` is raised.
Within each pass, overloads are tried in the order they were registered with
pybind11.
What this means in practice is that pybind11 will prefer any overload that does
not require conversion of arguments to an overload that does, but otherwise prefers
earlier-defined overloads to later-defined ones.
.. note::
pybind11 does *not* further prioritize based on the number/pattern of
overloaded arguments. That is, pybind11 does not prioritize a function
requiring one conversion over one requiring three, but only prioritizes
overloads requiring no conversion at all to overloads that require
conversion of at least one argument.
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/advanced/functions.rst | functions.rst |
Smart pointers
##############
std::unique_ptr
===============
Given a class ``Example`` with Python bindings, it's possible to return
instances wrapped in C++11 unique pointers, like so
.. code-block:: cpp
std::unique_ptr<Example> create_example() { return std::unique_ptr<Example>(new Example()); }
.. code-block:: cpp
m.def("create_example", &create_example);
In other words, there is nothing special that needs to be done. While returning
unique pointers in this way is allowed, it is *illegal* to use them as function
arguments. For instance, the following function signature cannot be processed
by pybind11.
.. code-block:: cpp
void do_something_with_example(std::unique_ptr<Example> ex) { ... }
The above signature would imply that Python needs to give up ownership of an
object that is passed to this function, which is generally not possible (for
instance, the object might be referenced elsewhere).
std::shared_ptr
===============
The binding generator for classes, :class:`class_`, can be passed a template
type that denotes a special *holder* type that is used to manage references to
the object. If no such holder type template argument is given, the default for
a type named ``Type`` is ``std::unique_ptr<Type>``, which means that the object
is deallocated when Python's reference count goes to zero.
It is possible to switch to other types of reference counting wrappers or smart
pointers, which is useful in codebases that rely on them. For instance, the
following snippet causes ``std::shared_ptr`` to be used instead.
.. code-block:: cpp
py::class_<Example, std::shared_ptr<Example> /* <- holder type */> obj(m, "Example");
Note that any particular class can only be associated with a single holder type.
One potential stumbling block when using holder types is that they need to be
applied consistently. Can you guess what's broken about the following binding
code?
.. code-block:: cpp
class Child { };
class Parent {
public:
Parent() : child(std::make_shared<Child>()) { }
Child *get_child() { return child.get(); } /* Hint: ** DON'T DO THIS ** */
private:
std::shared_ptr<Child> child;
};
PYBIND11_MODULE(example, m) {
py::class_<Child, std::shared_ptr<Child>>(m, "Child");
py::class_<Parent, std::shared_ptr<Parent>>(m, "Parent")
.def(py::init<>())
.def("get_child", &Parent::get_child);
}
The following Python code will cause undefined behavior (and likely a
segmentation fault).
.. code-block:: python
from example import Parent
print(Parent().get_child())
The problem is that ``Parent::get_child()`` returns a pointer to an instance of
``Child``, but the fact that this instance is already managed by
``std::shared_ptr<...>`` is lost when passing raw pointers. In this case,
pybind11 will create a second independent ``std::shared_ptr<...>`` that also
claims ownership of the pointer. In the end, the object will be freed **twice**
since these shared pointers have no way of knowing about each other.
There are two ways to resolve this issue:
1. For types that are managed by a smart pointer class, never use raw pointers
in function arguments or return values. In other words: always consistently
wrap pointers into their designated holder types (such as
``std::shared_ptr<...>``). In this case, the signature of ``get_child()``
should be modified as follows:
.. code-block:: cpp
std::shared_ptr<Child> get_child() { return child; }
2. Adjust the definition of ``Child`` by specifying
``std::enable_shared_from_this<T>`` (see cppreference_ for details) as a
base class. This adds a small bit of information to ``Child`` that allows
pybind11 to realize that there is already an existing
``std::shared_ptr<...>`` and communicate with it. In this case, the
declaration of ``Child`` should look as follows:
.. _cppreference: http://en.cppreference.com/w/cpp/memory/enable_shared_from_this
.. code-block:: cpp
class Child : public std::enable_shared_from_this<Child> { };
.. _smart_pointers:
Custom smart pointers
=====================
pybind11 supports ``std::unique_ptr`` and ``std::shared_ptr`` right out of the
box. For any other custom smart pointer, transparent conversions can be enabled
using a macro invocation similar to the following. It must be declared at the
top namespace level before any binding code:
.. code-block:: cpp
PYBIND11_DECLARE_HOLDER_TYPE(T, SmartPtr<T>);
The first argument of :func:`PYBIND11_DECLARE_HOLDER_TYPE` should be a
placeholder name that is used as a template parameter of the second argument.
Thus, feel free to use any identifier, but use it consistently on both sides;
also, don't use the name of a type that already exists in your codebase.
The macro also accepts a third optional boolean parameter that is set to false
by default. Specify
.. code-block:: cpp
PYBIND11_DECLARE_HOLDER_TYPE(T, SmartPtr<T>, true);
if ``SmartPtr<T>`` can always be initialized from a ``T*`` pointer without the
risk of inconsistencies (such as multiple independent ``SmartPtr`` instances
believing that they are the sole owner of the ``T*`` pointer). A common
situation where ``true`` should be passed is when the ``T`` instances use
*intrusive* reference counting.
Please take a look at the :ref:`macro_notes` before using this feature.
By default, pybind11 assumes that your custom smart pointer has a standard
interface, i.e. provides a ``.get()`` member function to access the underlying
raw pointer. If this is not the case, pybind11's ``holder_helper`` must be
specialized:
.. code-block:: cpp
// Always needed for custom holder types
PYBIND11_DECLARE_HOLDER_TYPE(T, SmartPtr<T>);
// Only needed if the type's `.get()` goes by another name
namespace pybind11 { namespace detail {
template <typename T>
struct holder_helper<SmartPtr<T>> { // <-- specialization
static const T *get(const SmartPtr<T> &p) { return p.getPointer(); }
};
}}
The above specialization informs pybind11 that the custom ``SmartPtr`` class
provides ``.get()`` functionality via ``.getPointer()``.
.. seealso::
The file :file:`tests/test_smart_ptr.cpp` contains a complete example
that demonstrates how to work with custom reference-counting holder types
in more detail.
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/advanced/smart_ptrs.rst | smart_ptrs.rst |
Exceptions
##########
Built-in exception translation
==============================
When C++ code invoked from Python throws an ``std::exception``, it is
automatically converted into a Python ``Exception``. pybind11 defines multiple
special exception classes that will map to different types of Python
exceptions:
.. tabularcolumns:: |p{0.5\textwidth}|p{0.45\textwidth}|
+--------------------------------------+--------------------------------------+
| C++ exception type | Python exception type |
+======================================+======================================+
| :class:`std::exception` | ``RuntimeError`` |
+--------------------------------------+--------------------------------------+
| :class:`std::bad_alloc` | ``MemoryError`` |
+--------------------------------------+--------------------------------------+
| :class:`std::domain_error` | ``ValueError`` |
+--------------------------------------+--------------------------------------+
| :class:`std::invalid_argument` | ``ValueError`` |
+--------------------------------------+--------------------------------------+
| :class:`std::length_error` | ``ValueError`` |
+--------------------------------------+--------------------------------------+
| :class:`std::out_of_range` | ``IndexError`` |
+--------------------------------------+--------------------------------------+
| :class:`std::range_error` | ``ValueError`` |
+--------------------------------------+--------------------------------------+
| :class:`std::overflow_error` | ``OverflowError`` |
+--------------------------------------+--------------------------------------+
| :class:`pybind11::stop_iteration` | ``StopIteration`` (used to implement |
| | custom iterators) |
+--------------------------------------+--------------------------------------+
| :class:`pybind11::index_error` | ``IndexError`` (used to indicate out |
| | of bounds access in ``__getitem__``, |
| | ``__setitem__``, etc.) |
+--------------------------------------+--------------------------------------+
| :class:`pybind11::value_error` | ``ValueError`` (used to indicate |
| | wrong value passed in |
| | ``container.remove(...)``) |
+--------------------------------------+--------------------------------------+
| :class:`pybind11::key_error` | ``KeyError`` (used to indicate out |
| | of bounds access in ``__getitem__``, |
| | ``__setitem__`` in dict-like |
| | objects, etc.) |
+--------------------------------------+--------------------------------------+
| :class:`pybind11::error_already_set` | Indicates that the Python exception |
| | flag has already been set via Python |
| | API calls from C++ code; this C++ |
| | exception is used to propagate such |
| | a Python exception back to Python. |
+--------------------------------------+--------------------------------------+
When a Python function invoked from C++ throws an exception, it is converted
into a C++ exception of type :class:`error_already_set` whose string payload
contains a textual summary.
There is also a special exception :class:`cast_error` that is thrown by
:func:`handle::call` when the input arguments cannot be converted to Python
objects.
Registering custom translators
==============================
If the default exception conversion policy described above is insufficient,
pybind11 also provides support for registering custom exception translators.
To register a simple exception conversion that translates a C++ exception into
a new Python exception using the C++ exception's ``what()`` method, a helper
function is available:
.. code-block:: cpp
py::register_exception<CppExp>(module, "PyExp");
This call creates a Python exception class with the name ``PyExp`` in the given
module and automatically converts any encountered exceptions of type ``CppExp``
into Python exceptions of type ``PyExp``.
When more advanced exception translation is needed, the function
``py::register_exception_translator(translator)`` can be used to register
functions that can translate arbitrary exception types (and which may include
additional logic to do so). The function takes a stateless callable (e.g. a
function pointer or a lambda function without captured variables) with the call
signature ``void(std::exception_ptr)``.
When a C++ exception is thrown, the registered exception translators are tried
in reverse order of registration (i.e. the last registered translator gets the
first shot at handling the exception).
Inside the translator, ``std::rethrow_exception`` should be used within
a try block to re-throw the exception. One or more catch clauses to catch
the appropriate exceptions should then be used with each clause using
``PyErr_SetString`` to set a Python exception or ``ex(string)`` to set
the python exception to a custom exception type (see below).
To declare a custom Python exception type, declare a ``py::exception`` variable
and use this in the associated exception translator (note: it is often useful
to make this a static declaration when using it inside a lambda expression
without requiring capturing).
The following example demonstrates this for a hypothetical exception classes
``MyCustomException`` and ``OtherException``: the first is translated to a
custom python exception ``MyCustomError``, while the second is translated to a
standard python RuntimeError:
.. code-block:: cpp
static py::exception<MyCustomException> exc(m, "MyCustomError");
py::register_exception_translator([](std::exception_ptr p) {
try {
if (p) std::rethrow_exception(p);
} catch (const MyCustomException &e) {
exc(e.what());
} catch (const OtherException &e) {
PyErr_SetString(PyExc_RuntimeError, e.what());
}
});
Multiple exceptions can be handled by a single translator, as shown in the
example above. If the exception is not caught by the current translator, the
previously registered one gets a chance.
If none of the registered exception translators is able to handle the
exception, it is handled by the default converter as described in the previous
section.
.. seealso::
The file :file:`tests/test_exceptions.cpp` contains examples
of various custom exception translators and custom exception types.
.. note::
You must call either ``PyErr_SetString`` or a custom exception's call
operator (``exc(string)``) for every exception caught in a custom exception
translator. Failure to do so will cause Python to crash with ``SystemError:
error return without exception set``.
Exceptions that you do not plan to handle should simply not be caught, or
may be explicitly (re-)thrown to delegate it to the other,
previously-declared existing exception translators.
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/advanced/exceptions.rst | exceptions.rst |
Classes
#######
This section presents advanced binding code for classes and it is assumed
that you are already familiar with the basics from :doc:`/classes`.
.. _overriding_virtuals:
Overriding virtual functions in Python
======================================
Suppose that a C++ class or interface has a virtual function that we'd like to
to override from within Python (we'll focus on the class ``Animal``; ``Dog`` is
given as a specific example of how one would do this with traditional C++
code).
.. code-block:: cpp
class Animal {
public:
virtual ~Animal() { }
virtual std::string go(int n_times) = 0;
};
class Dog : public Animal {
public:
std::string go(int n_times) override {
std::string result;
for (int i=0; i<n_times; ++i)
result += "woof! ";
return result;
}
};
Let's also suppose that we are given a plain function which calls the
function ``go()`` on an arbitrary ``Animal`` instance.
.. code-block:: cpp
std::string call_go(Animal *animal) {
return animal->go(3);
}
Normally, the binding code for these classes would look as follows:
.. code-block:: cpp
PYBIND11_MODULE(example, m) {
py::class_<Animal>(m, "Animal")
.def("go", &Animal::go);
py::class_<Dog, Animal>(m, "Dog")
.def(py::init<>());
m.def("call_go", &call_go);
}
However, these bindings are impossible to extend: ``Animal`` is not
constructible, and we clearly require some kind of "trampoline" that
redirects virtual calls back to Python.
Defining a new type of ``Animal`` from within Python is possible but requires a
helper class that is defined as follows:
.. code-block:: cpp
class PyAnimal : public Animal {
public:
/* Inherit the constructors */
using Animal::Animal;
/* Trampoline (need one for each virtual function) */
std::string go(int n_times) override {
PYBIND11_OVERLOAD_PURE(
std::string, /* Return type */
Animal, /* Parent class */
go, /* Name of function in C++ (must match Python name) */
n_times /* Argument(s) */
);
}
};
The macro :c:macro:`PYBIND11_OVERLOAD_PURE` should be used for pure virtual
functions, and :c:macro:`PYBIND11_OVERLOAD` should be used for functions which have
a default implementation. There are also two alternate macros
:c:macro:`PYBIND11_OVERLOAD_PURE_NAME` and :c:macro:`PYBIND11_OVERLOAD_NAME` which
take a string-valued name argument between the *Parent class* and *Name of the
function* slots, which defines the name of function in Python. This is required
when the C++ and Python versions of the
function have different names, e.g. ``operator()`` vs ``__call__``.
The binding code also needs a few minor adaptations (highlighted):
.. code-block:: cpp
:emphasize-lines: 2,3
PYBIND11_MODULE(example, m) {
py::class_<Animal, PyAnimal /* <--- trampoline*/>(m, "Animal")
.def(py::init<>())
.def("go", &Animal::go);
py::class_<Dog, Animal>(m, "Dog")
.def(py::init<>());
m.def("call_go", &call_go);
}
Importantly, pybind11 is made aware of the trampoline helper class by
specifying it as an extra template argument to :class:`class_`. (This can also
be combined with other template arguments such as a custom holder type; the
order of template types does not matter). Following this, we are able to
define a constructor as usual.
Bindings should be made against the actual class, not the trampoline helper class.
.. code-block:: cpp
:emphasize-lines: 3
py::class_<Animal, PyAnimal /* <--- trampoline*/>(m, "Animal");
.def(py::init<>())
.def("go", &PyAnimal::go); /* <--- THIS IS WRONG, use &Animal::go */
Note, however, that the above is sufficient for allowing python classes to
extend ``Animal``, but not ``Dog``: see :ref:`virtual_and_inheritance` for the
necessary steps required to providing proper overload support for inherited
classes.
The Python session below shows how to override ``Animal::go`` and invoke it via
a virtual method call.
.. code-block:: pycon
>>> from example import *
>>> d = Dog()
>>> call_go(d)
u'woof! woof! woof! '
>>> class Cat(Animal):
... def go(self, n_times):
... return "meow! " * n_times
...
>>> c = Cat()
>>> call_go(c)
u'meow! meow! meow! '
If you are defining a custom constructor in a derived Python class, you *must*
ensure that you explicitly call the bound C++ constructor using ``__init__``,
*regardless* of whether it is a default constructor or not. Otherwise, the
memory for the C++ portion of the instance will be left uninitialized, which
will generally leave the C++ instance in an invalid state and cause undefined
behavior if the C++ instance is subsequently used.
Here is an example:
.. code-block:: python
class Dachshund(Dog):
def __init__(self, name):
Dog.__init__(self) # Without this, undefined behavior may occur if the C++ portions are referenced.
self.name = name
def bark(self):
return "yap!"
Note that a direct ``__init__`` constructor *should be called*, and ``super()``
should not be used. For simple cases of linear inheritance, ``super()``
may work, but once you begin mixing Python and C++ multiple inheritance,
things will fall apart due to differences between Python's MRO and C++'s
mechanisms.
Please take a look at the :ref:`macro_notes` before using this feature.
.. note::
When the overridden type returns a reference or pointer to a type that
pybind11 converts from Python (for example, numeric values, std::string,
and other built-in value-converting types), there are some limitations to
be aware of:
- because in these cases there is no C++ variable to reference (the value
is stored in the referenced Python variable), pybind11 provides one in
the PYBIND11_OVERLOAD macros (when needed) with static storage duration.
Note that this means that invoking the overloaded method on *any*
instance will change the referenced value stored in *all* instances of
that type.
- Attempts to modify a non-const reference will not have the desired
effect: it will change only the static cache variable, but this change
will not propagate to underlying Python instance, and the change will be
replaced the next time the overload is invoked.
.. seealso::
The file :file:`tests/test_virtual_functions.cpp` contains a complete
example that demonstrates how to override virtual functions using pybind11
in more detail.
.. _virtual_and_inheritance:
Combining virtual functions and inheritance
===========================================
When combining virtual methods with inheritance, you need to be sure to provide
an override for each method for which you want to allow overrides from derived
python classes. For example, suppose we extend the above ``Animal``/``Dog``
example as follows:
.. code-block:: cpp
class Animal {
public:
virtual std::string go(int n_times) = 0;
virtual std::string name() { return "unknown"; }
};
class Dog : public Animal {
public:
std::string go(int n_times) override {
std::string result;
for (int i=0; i<n_times; ++i)
result += bark() + " ";
return result;
}
virtual std::string bark() { return "woof!"; }
};
then the trampoline class for ``Animal`` must, as described in the previous
section, override ``go()`` and ``name()``, but in order to allow python code to
inherit properly from ``Dog``, we also need a trampoline class for ``Dog`` that
overrides both the added ``bark()`` method *and* the ``go()`` and ``name()``
methods inherited from ``Animal`` (even though ``Dog`` doesn't directly
override the ``name()`` method):
.. code-block:: cpp
class PyAnimal : public Animal {
public:
using Animal::Animal; // Inherit constructors
std::string go(int n_times) override { PYBIND11_OVERLOAD_PURE(std::string, Animal, go, n_times); }
std::string name() override { PYBIND11_OVERLOAD(std::string, Animal, name, ); }
};
class PyDog : public Dog {
public:
using Dog::Dog; // Inherit constructors
std::string go(int n_times) override { PYBIND11_OVERLOAD(std::string, Dog, go, n_times); }
std::string name() override { PYBIND11_OVERLOAD(std::string, Dog, name, ); }
std::string bark() override { PYBIND11_OVERLOAD(std::string, Dog, bark, ); }
};
.. note::
Note the trailing commas in the ``PYBIND11_OVERLOAD`` calls to ``name()``
and ``bark()``. These are needed to portably implement a trampoline for a
function that does not take any arguments. For functions that take
a nonzero number of arguments, the trailing comma must be omitted.
A registered class derived from a pybind11-registered class with virtual
methods requires a similar trampoline class, *even if* it doesn't explicitly
declare or override any virtual methods itself:
.. code-block:: cpp
class Husky : public Dog {};
class PyHusky : public Husky {
public:
using Husky::Husky; // Inherit constructors
std::string go(int n_times) override { PYBIND11_OVERLOAD_PURE(std::string, Husky, go, n_times); }
std::string name() override { PYBIND11_OVERLOAD(std::string, Husky, name, ); }
std::string bark() override { PYBIND11_OVERLOAD(std::string, Husky, bark, ); }
};
There is, however, a technique that can be used to avoid this duplication
(which can be especially helpful for a base class with several virtual
methods). The technique involves using template trampoline classes, as
follows:
.. code-block:: cpp
template <class AnimalBase = Animal> class PyAnimal : public AnimalBase {
public:
using AnimalBase::AnimalBase; // Inherit constructors
std::string go(int n_times) override { PYBIND11_OVERLOAD_PURE(std::string, AnimalBase, go, n_times); }
std::string name() override { PYBIND11_OVERLOAD(std::string, AnimalBase, name, ); }
};
template <class DogBase = Dog> class PyDog : public PyAnimal<DogBase> {
public:
using PyAnimal<DogBase>::PyAnimal; // Inherit constructors
// Override PyAnimal's pure virtual go() with a non-pure one:
std::string go(int n_times) override { PYBIND11_OVERLOAD(std::string, DogBase, go, n_times); }
std::string bark() override { PYBIND11_OVERLOAD(std::string, DogBase, bark, ); }
};
This technique has the advantage of requiring just one trampoline method to be
declared per virtual method and pure virtual method override. It does,
however, require the compiler to generate at least as many methods (and
possibly more, if both pure virtual and overridden pure virtual methods are
exposed, as above).
The classes are then registered with pybind11 using:
.. code-block:: cpp
py::class_<Animal, PyAnimal<>> animal(m, "Animal");
py::class_<Dog, PyDog<>> dog(m, "Dog");
py::class_<Husky, PyDog<Husky>> husky(m, "Husky");
// ... add animal, dog, husky definitions
Note that ``Husky`` did not require a dedicated trampoline template class at
all, since it neither declares any new virtual methods nor provides any pure
virtual method implementations.
With either the repeated-virtuals or templated trampoline methods in place, you
can now create a python class that inherits from ``Dog``:
.. code-block:: python
class ShihTzu(Dog):
def bark(self):
return "yip!"
.. seealso::
See the file :file:`tests/test_virtual_functions.cpp` for complete examples
using both the duplication and templated trampoline approaches.
.. _extended_aliases:
Extended trampoline class functionality
=======================================
.. _extended_class_functionality_forced_trampoline:
Forced trampoline class initialisation
--------------------------------------
The trampoline classes described in the previous sections are, by default, only
initialized when needed. More specifically, they are initialized when a python
class actually inherits from a registered type (instead of merely creating an
instance of the registered type), or when a registered constructor is only
valid for the trampoline class but not the registered class. This is primarily
for performance reasons: when the trampoline class is not needed for anything
except virtual method dispatching, not initializing the trampoline class
improves performance by avoiding needing to do a run-time check to see if the
inheriting python instance has an overloaded method.
Sometimes, however, it is useful to always initialize a trampoline class as an
intermediate class that does more than just handle virtual method dispatching.
For example, such a class might perform extra class initialization, extra
destruction operations, and might define new members and methods to enable a
more python-like interface to a class.
In order to tell pybind11 that it should *always* initialize the trampoline
class when creating new instances of a type, the class constructors should be
declared using ``py::init_alias<Args, ...>()`` instead of the usual
``py::init<Args, ...>()``. This forces construction via the trampoline class,
ensuring member initialization and (eventual) destruction.
.. seealso::
See the file :file:`tests/test_virtual_functions.cpp` for complete examples
showing both normal and forced trampoline instantiation.
Different method signatures
---------------------------
The macro's introduced in :ref:`overriding_virtuals` cover most of the standard
use cases when exposing C++ classes to Python. Sometimes it is hard or unwieldy
to create a direct one-on-one mapping between the arguments and method return
type.
An example would be when the C++ signature contains output arguments using
references (See also :ref:`faq_reference_arguments`). Another way of solving
this is to use the method body of the trampoline class to do conversions to the
input and return of the Python method.
The main building block to do so is the :func:`get_overload`, this function
allows retrieving a method implemented in Python from within the trampoline's
methods. Consider for example a C++ method which has the signature
``bool myMethod(int32_t& value)``, where the return indicates whether
something should be done with the ``value``. This can be made convenient on the
Python side by allowing the Python function to return ``None`` or an ``int``:
.. code-block:: cpp
bool MyClass::myMethod(int32_t& value)
{
pybind11::gil_scoped_acquire gil; // Acquire the GIL while in this scope.
// Try to look up the overloaded method on the Python side.
pybind11::function overload = pybind11::get_overload(this, "myMethod");
if (overload) { // method is found
auto obj = overload(value); // Call the Python function.
if (py::isinstance<py::int_>(obj)) { // check if it returned a Python integer type
value = obj.cast<int32_t>(); // Cast it and assign it to the value.
return true; // Return true; value should be used.
} else {
return false; // Python returned none, return false.
}
}
return false; // Alternatively return MyClass::myMethod(value);
}
.. _custom_constructors:
Custom constructors
===================
The syntax for binding constructors was previously introduced, but it only
works when a constructor of the appropriate arguments actually exists on the
C++ side. To extend this to more general cases, pybind11 makes it possible
to bind factory functions as constructors. For example, suppose you have a
class like this:
.. code-block:: cpp
class Example {
private:
Example(int); // private constructor
public:
// Factory function:
static Example create(int a) { return Example(a); }
};
py::class_<Example>(m, "Example")
.def(py::init(&Example::create));
While it is possible to create a straightforward binding of the static
``create`` method, it may sometimes be preferable to expose it as a constructor
on the Python side. This can be accomplished by calling ``.def(py::init(...))``
with the function reference returning the new instance passed as an argument.
It is also possible to use this approach to bind a function returning a new
instance by raw pointer or by the holder (e.g. ``std::unique_ptr``).
The following example shows the different approaches:
.. code-block:: cpp
class Example {
private:
Example(int); // private constructor
public:
// Factory function - returned by value:
static Example create(int a) { return Example(a); }
// These constructors are publicly callable:
Example(double);
Example(int, int);
Example(std::string);
};
py::class_<Example>(m, "Example")
// Bind the factory function as a constructor:
.def(py::init(&Example::create))
// Bind a lambda function returning a pointer wrapped in a holder:
.def(py::init([](std::string arg) {
return std::unique_ptr<Example>(new Example(arg));
}))
// Return a raw pointer:
.def(py::init([](int a, int b) { return new Example(a, b); }))
// You can mix the above with regular C++ constructor bindings as well:
.def(py::init<double>())
;
When the constructor is invoked from Python, pybind11 will call the factory
function and store the resulting C++ instance in the Python instance.
When combining factory functions constructors with :ref:`virtual function
trampolines <overriding_virtuals>` there are two approaches. The first is to
add a constructor to the alias class that takes a base value by
rvalue-reference. If such a constructor is available, it will be used to
construct an alias instance from the value returned by the factory function.
The second option is to provide two factory functions to ``py::init()``: the
first will be invoked when no alias class is required (i.e. when the class is
being used but not inherited from in Python), and the second will be invoked
when an alias is required.
You can also specify a single factory function that always returns an alias
instance: this will result in behaviour similar to ``py::init_alias<...>()``,
as described in the :ref:`extended trampoline class documentation
<extended_aliases>`.
The following example shows the different factory approaches for a class with
an alias:
.. code-block:: cpp
#include <pybind11/factory.h>
class Example {
public:
// ...
virtual ~Example() = default;
};
class PyExample : public Example {
public:
using Example::Example;
PyExample(Example &&base) : Example(std::move(base)) {}
};
py::class_<Example, PyExample>(m, "Example")
// Returns an Example pointer. If a PyExample is needed, the Example
// instance will be moved via the extra constructor in PyExample, above.
.def(py::init([]() { return new Example(); }))
// Two callbacks:
.def(py::init([]() { return new Example(); } /* no alias needed */,
[]() { return new PyExample(); } /* alias needed */))
// *Always* returns an alias instance (like py::init_alias<>())
.def(py::init([]() { return new PyExample(); }))
;
Brace initialization
--------------------
``pybind11::init<>`` internally uses C++11 brace initialization to call the
constructor of the target class. This means that it can be used to bind
*implicit* constructors as well:
.. code-block:: cpp
struct Aggregate {
int a;
std::string b;
};
py::class_<Aggregate>(m, "Aggregate")
.def(py::init<int, const std::string &>());
.. note::
Note that brace initialization preferentially invokes constructor overloads
taking a ``std::initializer_list``. In the rare event that this causes an
issue, you can work around it by using ``py::init(...)`` with a lambda
function that constructs the new object as desired.
.. _classes_with_non_public_destructors:
Non-public destructors
======================
If a class has a private or protected destructor (as might e.g. be the case in
a singleton pattern), a compile error will occur when creating bindings via
pybind11. The underlying issue is that the ``std::unique_ptr`` holder type that
is responsible for managing the lifetime of instances will reference the
destructor even if no deallocations ever take place. In order to expose classes
with private or protected destructors, it is possible to override the holder
type via a holder type argument to ``class_``. Pybind11 provides a helper class
``py::nodelete`` that disables any destructor invocations. In this case, it is
crucial that instances are deallocated on the C++ side to avoid memory leaks.
.. code-block:: cpp
/* ... definition ... */
class MyClass {
private:
~MyClass() { }
};
/* ... binding code ... */
py::class_<MyClass, std::unique_ptr<MyClass, py::nodelete>>(m, "MyClass")
.def(py::init<>())
.. _implicit_conversions:
Implicit conversions
====================
Suppose that instances of two types ``A`` and ``B`` are used in a project, and
that an ``A`` can easily be converted into an instance of type ``B`` (examples of this
could be a fixed and an arbitrary precision number type).
.. code-block:: cpp
py::class_<A>(m, "A")
/// ... members ...
py::class_<B>(m, "B")
.def(py::init<A>())
/// ... members ...
m.def("func",
[](const B &) { /* .... */ }
);
To invoke the function ``func`` using a variable ``a`` containing an ``A``
instance, we'd have to write ``func(B(a))`` in Python. On the other hand, C++
will automatically apply an implicit type conversion, which makes it possible
to directly write ``func(a)``.
In this situation (i.e. where ``B`` has a constructor that converts from
``A``), the following statement enables similar implicit conversions on the
Python side:
.. code-block:: cpp
py::implicitly_convertible<A, B>();
.. note::
Implicit conversions from ``A`` to ``B`` only work when ``B`` is a custom
data type that is exposed to Python via pybind11.
To prevent runaway recursion, implicit conversions are non-reentrant: an
implicit conversion invoked as part of another implicit conversion of the
same type (i.e. from ``A`` to ``B``) will fail.
.. _static_properties:
Static properties
=================
The section on :ref:`properties` discussed the creation of instance properties
that are implemented in terms of C++ getters and setters.
Static properties can also be created in a similar way to expose getters and
setters of static class attributes. Note that the implicit ``self`` argument
also exists in this case and is used to pass the Python ``type`` subclass
instance. This parameter will often not be needed by the C++ side, and the
following example illustrates how to instantiate a lambda getter function
that ignores it:
.. code-block:: cpp
py::class_<Foo>(m, "Foo")
.def_property_readonly_static("foo", [](py::object /* self */) { return Foo(); });
Operator overloading
====================
Suppose that we're given the following ``Vector2`` class with a vector addition
and scalar multiplication operation, all implemented using overloaded operators
in C++.
.. code-block:: cpp
class Vector2 {
public:
Vector2(float x, float y) : x(x), y(y) { }
Vector2 operator+(const Vector2 &v) const { return Vector2(x + v.x, y + v.y); }
Vector2 operator*(float value) const { return Vector2(x * value, y * value); }
Vector2& operator+=(const Vector2 &v) { x += v.x; y += v.y; return *this; }
Vector2& operator*=(float v) { x *= v; y *= v; return *this; }
friend Vector2 operator*(float f, const Vector2 &v) {
return Vector2(f * v.x, f * v.y);
}
std::string toString() const {
return "[" + std::to_string(x) + ", " + std::to_string(y) + "]";
}
private:
float x, y;
};
The following snippet shows how the above operators can be conveniently exposed
to Python.
.. code-block:: cpp
#include <pybind11/operators.h>
PYBIND11_MODULE(example, m) {
py::class_<Vector2>(m, "Vector2")
.def(py::init<float, float>())
.def(py::self + py::self)
.def(py::self += py::self)
.def(py::self *= float())
.def(float() * py::self)
.def(py::self * float())
.def(-py::self)
.def("__repr__", &Vector2::toString);
}
Note that a line like
.. code-block:: cpp
.def(py::self * float())
is really just short hand notation for
.. code-block:: cpp
.def("__mul__", [](const Vector2 &a, float b) {
return a * b;
}, py::is_operator())
This can be useful for exposing additional operators that don't exist on the
C++ side, or to perform other types of customization. The ``py::is_operator``
flag marker is needed to inform pybind11 that this is an operator, which
returns ``NotImplemented`` when invoked with incompatible arguments rather than
throwing a type error.
.. note::
To use the more convenient ``py::self`` notation, the additional
header file :file:`pybind11/operators.h` must be included.
.. seealso::
The file :file:`tests/test_operator_overloading.cpp` contains a
complete example that demonstrates how to work with overloaded operators in
more detail.
.. _pickling:
Pickling support
================
Python's ``pickle`` module provides a powerful facility to serialize and
de-serialize a Python object graph into a binary data stream. To pickle and
unpickle C++ classes using pybind11, a ``py::pickle()`` definition must be
provided. Suppose the class in question has the following signature:
.. code-block:: cpp
class Pickleable {
public:
Pickleable(const std::string &value) : m_value(value) { }
const std::string &value() const { return m_value; }
void setExtra(int extra) { m_extra = extra; }
int extra() const { return m_extra; }
private:
std::string m_value;
int m_extra = 0;
};
Pickling support in Python is enabled by defining the ``__setstate__`` and
``__getstate__`` methods [#f3]_. For pybind11 classes, use ``py::pickle()``
to bind these two functions:
.. code-block:: cpp
py::class_<Pickleable>(m, "Pickleable")
.def(py::init<std::string>())
.def("value", &Pickleable::value)
.def("extra", &Pickleable::extra)
.def("setExtra", &Pickleable::setExtra)
.def(py::pickle(
[](const Pickleable &p) { // __getstate__
/* Return a tuple that fully encodes the state of the object */
return py::make_tuple(p.value(), p.extra());
},
[](py::tuple t) { // __setstate__
if (t.size() != 2)
throw std::runtime_error("Invalid state!");
/* Create a new C++ instance */
Pickleable p(t[0].cast<std::string>());
/* Assign any additional state */
p.setExtra(t[1].cast<int>());
return p;
}
));
The ``__setstate__`` part of the ``py::picke()`` definition follows the same
rules as the single-argument version of ``py::init()``. The return type can be
a value, pointer or holder type. See :ref:`custom_constructors` for details.
An instance can now be pickled as follows:
.. code-block:: python
try:
import cPickle as pickle # Use cPickle on Python 2.7
except ImportError:
import pickle
p = Pickleable("test_value")
p.setExtra(15)
data = pickle.dumps(p, 2)
Note that only the cPickle module is supported on Python 2.7. The second
argument to ``dumps`` is also crucial: it selects the pickle protocol version
2, since the older version 1 is not supported. Newer versions are also fine—for
instance, specify ``-1`` to always use the latest available version. Beware:
failure to follow these instructions will cause important pybind11 memory
allocation routines to be skipped during unpickling, which will likely lead to
memory corruption and/or segmentation faults.
.. seealso::
The file :file:`tests/test_pickling.cpp` contains a complete example
that demonstrates how to pickle and unpickle types using pybind11 in more
detail.
.. [#f3] http://docs.python.org/3/library/pickle.html#pickling-class-instances
Multiple Inheritance
====================
pybind11 can create bindings for types that derive from multiple base types
(aka. *multiple inheritance*). To do so, specify all bases in the template
arguments of the ``class_`` declaration:
.. code-block:: cpp
py::class_<MyType, BaseType1, BaseType2, BaseType3>(m, "MyType")
...
The base types can be specified in arbitrary order, and they can even be
interspersed with alias types and holder types (discussed earlier in this
document)---pybind11 will automatically find out which is which. The only
requirement is that the first template argument is the type to be declared.
It is also permitted to inherit multiply from exported C++ classes in Python,
as well as inheriting from multiple Python and/or pybind11-exported classes.
There is one caveat regarding the implementation of this feature:
When only one base type is specified for a C++ type that actually has multiple
bases, pybind11 will assume that it does not participate in multiple
inheritance, which can lead to undefined behavior. In such cases, add the tag
``multiple_inheritance`` to the class constructor:
.. code-block:: cpp
py::class_<MyType, BaseType2>(m, "MyType", py::multiple_inheritance());
The tag is redundant and does not need to be specified when multiple base types
are listed.
.. _module_local:
Module-local class bindings
===========================
When creating a binding for a class, pybind11 by default makes that binding
"global" across modules. What this means is that a type defined in one module
can be returned from any module resulting in the same Python type. For
example, this allows the following:
.. code-block:: cpp
// In the module1.cpp binding code for module1:
py::class_<Pet>(m, "Pet")
.def(py::init<std::string>())
.def_readonly("name", &Pet::name);
.. code-block:: cpp
// In the module2.cpp binding code for module2:
m.def("create_pet", [](std::string name) { return new Pet(name); });
.. code-block:: pycon
>>> from module1 import Pet
>>> from module2 import create_pet
>>> pet1 = Pet("Kitty")
>>> pet2 = create_pet("Doggy")
>>> pet2.name()
'Doggy'
When writing binding code for a library, this is usually desirable: this
allows, for example, splitting up a complex library into multiple Python
modules.
In some cases, however, this can cause conflicts. For example, suppose two
unrelated modules make use of an external C++ library and each provide custom
bindings for one of that library's classes. This will result in an error when
a Python program attempts to import both modules (directly or indirectly)
because of conflicting definitions on the external type:
.. code-block:: cpp
// dogs.cpp
// Binding for external library class:
py::class<pets::Pet>(m, "Pet")
.def("name", &pets::Pet::name);
// Binding for local extension class:
py::class<Dog, pets::Pet>(m, "Dog")
.def(py::init<std::string>());
.. code-block:: cpp
// cats.cpp, in a completely separate project from the above dogs.cpp.
// Binding for external library class:
py::class<pets::Pet>(m, "Pet")
.def("get_name", &pets::Pet::name);
// Binding for local extending class:
py::class<Cat, pets::Pet>(m, "Cat")
.def(py::init<std::string>());
.. code-block:: pycon
>>> import cats
>>> import dogs
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ImportError: generic_type: type "Pet" is already registered!
To get around this, you can tell pybind11 to keep the external class binding
localized to the module by passing the ``py::module_local()`` attribute into
the ``py::class_`` constructor:
.. code-block:: cpp
// Pet binding in dogs.cpp:
py::class<pets::Pet>(m, "Pet", py::module_local())
.def("name", &pets::Pet::name);
.. code-block:: cpp
// Pet binding in cats.cpp:
py::class<pets::Pet>(m, "Pet", py::module_local())
.def("get_name", &pets::Pet::name);
This makes the Python-side ``dogs.Pet`` and ``cats.Pet`` into distinct classes,
avoiding the conflict and allowing both modules to be loaded. C++ code in the
``dogs`` module that casts or returns a ``Pet`` instance will result in a
``dogs.Pet`` Python instance, while C++ code in the ``cats`` module will result
in a ``cats.Pet`` Python instance.
This does come with two caveats, however: First, external modules cannot return
or cast a ``Pet`` instance to Python (unless they also provide their own local
bindings). Second, from the Python point of view they are two distinct classes.
Note that the locality only applies in the C++ -> Python direction. When
passing such a ``py::module_local`` type into a C++ function, the module-local
classes are still considered. This means that if the following function is
added to any module (including but not limited to the ``cats`` and ``dogs``
modules above) it will be callable with either a ``dogs.Pet`` or ``cats.Pet``
argument:
.. code-block:: cpp
m.def("pet_name", [](const pets::Pet &pet) { return pet.name(); });
For example, suppose the above function is added to each of ``cats.cpp``,
``dogs.cpp`` and ``frogs.cpp`` (where ``frogs.cpp`` is some other module that
does *not* bind ``Pets`` at all).
.. code-block:: pycon
>>> import cats, dogs, frogs # No error because of the added py::module_local()
>>> mycat, mydog = cats.Cat("Fluffy"), dogs.Dog("Rover")
>>> (cats.pet_name(mycat), dogs.pet_name(mydog))
('Fluffy', 'Rover')
>>> (cats.pet_name(mydog), dogs.pet_name(mycat), frogs.pet_name(mycat))
('Rover', 'Fluffy', 'Fluffy')
It is possible to use ``py::module_local()`` registrations in one module even
if another module registers the same type globally: within the module with the
module-local definition, all C++ instances will be cast to the associated bound
Python type. In other modules any such values are converted to the global
Python type created elsewhere.
.. note::
STL bindings (as provided via the optional :file:`pybind11/stl_bind.h`
header) apply ``py::module_local`` by default when the bound type might
conflict with other modules; see :ref:`stl_bind` for details.
.. note::
The localization of the bound types is actually tied to the shared object
or binary generated by the compiler/linker. For typical modules created
with ``PYBIND11_MODULE()``, this distinction is not significant. It is
possible, however, when :ref:`embedding` to embed multiple modules in the
same binary (see :ref:`embedding_modules`). In such a case, the
localization will apply across all embedded modules within the same binary.
.. seealso::
The file :file:`tests/test_local_bindings.cpp` contains additional examples
that demonstrate how ``py::module_local()`` works.
Binding protected member functions
==================================
It's normally not possible to expose ``protected`` member functions to Python:
.. code-block:: cpp
class A {
protected:
int foo() const { return 42; }
};
py::class_<A>(m, "A")
.def("foo", &A::foo); // error: 'foo' is a protected member of 'A'
On one hand, this is good because non-``public`` members aren't meant to be
accessed from the outside. But we may want to make use of ``protected``
functions in derived Python classes.
The following pattern makes this possible:
.. code-block:: cpp
class A {
protected:
int foo() const { return 42; }
};
class Publicist : public A { // helper type for exposing protected functions
public:
using A::foo; // inherited with different access modifier
};
py::class_<A>(m, "A") // bind the primary class
.def("foo", &Publicist::foo); // expose protected methods via the publicist
This works because ``&Publicist::foo`` is exactly the same function as
``&A::foo`` (same signature and address), just with a different access
modifier. The only purpose of the ``Publicist`` helper class is to make
the function name ``public``.
If the intent is to expose ``protected`` ``virtual`` functions which can be
overridden in Python, the publicist pattern can be combined with the previously
described trampoline:
.. code-block:: cpp
class A {
public:
virtual ~A() = default;
protected:
virtual int foo() const { return 42; }
};
class Trampoline : public A {
public:
int foo() const override { PYBIND11_OVERLOAD(int, A, foo, ); }
};
class Publicist : public A {
public:
using A::foo;
};
py::class_<A, Trampoline>(m, "A") // <-- `Trampoline` here
.def("foo", &Publicist::foo); // <-- `Publicist` here, not `Trampoline`!
.. note::
MSVC 2015 has a compiler bug (fixed in version 2017) which
requires a more explicit function binding in the form of
``.def("foo", static_cast<int (A::*)() const>(&Publicist::foo));``
where ``int (A::*)() const`` is the type of ``A::foo``.
Custom automatic downcasters
============================
As explained in :ref:`inheritance`, pybind11 comes with built-in
understanding of the dynamic type of polymorphic objects in C++; that
is, returning a Pet to Python produces a Python object that knows it's
wrapping a Dog, if Pet has virtual methods and pybind11 knows about
Dog and this Pet is in fact a Dog. Sometimes, you might want to
provide this automatic downcasting behavior when creating bindings for
a class hierarchy that does not use standard C++ polymorphism, such as
LLVM [#f4]_. As long as there's some way to determine at runtime
whether a downcast is safe, you can proceed by specializing the
``pybind11::polymorphic_type_hook`` template:
.. code-block:: cpp
enum class PetKind { Cat, Dog, Zebra };
struct Pet { // Not polymorphic: has no virtual methods
const PetKind kind;
int age = 0;
protected:
Pet(PetKind _kind) : kind(_kind) {}
};
struct Dog : Pet {
Dog() : Pet(PetKind::Dog) {}
std::string sound = "woof!";
std::string bark() const { return sound; }
};
namespace pybind11 {
template<> struct polymorphic_type_hook<Pet> {
static const void *get(const Pet *src, const std::type_info*& type) {
// note that src may be nullptr
if (src && src->kind == PetKind::Dog) {
type = &typeid(Dog);
return static_cast<const Dog*>(src);
}
return src;
}
};
} // namespace pybind11
When pybind11 wants to convert a C++ pointer of type ``Base*`` to a
Python object, it calls ``polymorphic_type_hook<Base>::get()`` to
determine if a downcast is possible. The ``get()`` function should use
whatever runtime information is available to determine if its ``src``
parameter is in fact an instance of some class ``Derived`` that
inherits from ``Base``. If it finds such a ``Derived``, it sets ``type
= &typeid(Derived)`` and returns a pointer to the ``Derived`` object
that contains ``src``. Otherwise, it just returns ``src``, leaving
``type`` at its default value of nullptr. If you set ``type`` to a
type that pybind11 doesn't know about, no downcasting will occur, and
the original ``src`` pointer will be used with its static type
``Base*``.
It is critical that the returned pointer and ``type`` argument of
``get()`` agree with each other: if ``type`` is set to something
non-null, the returned pointer must point to the start of an object
whose type is ``type``. If the hierarchy being exposed uses only
single inheritance, a simple ``return src;`` will achieve this just
fine, but in the general case, you must cast ``src`` to the
appropriate derived-class pointer (e.g. using
``static_cast<Derived>(src)``) before allowing it to be returned as a
``void*``.
.. [#f4] https://llvm.org/docs/HowToSetUpLLVMStyleRTTI.html
.. note::
pybind11's standard support for downcasting objects whose types
have virtual methods is implemented using
``polymorphic_type_hook`` too, using the standard C++ ability to
determine the most-derived type of a polymorphic object using
``typeid()`` and to cast a base pointer to that most-derived type
(even if you don't know what it is) using ``dynamic_cast<void*>``.
.. seealso::
The file :file:`tests/test_tagbased_polymorphic.cpp` contains a
more complete example, including a demonstration of how to provide
automatic downcasting for an entire class hierarchy without
writing one get() function for each class.
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/advanced/classes.rst | classes.rst |
.. _embedding:
Embedding the interpreter
#########################
While pybind11 is mainly focused on extending Python using C++, it's also
possible to do the reverse: embed the Python interpreter into a C++ program.
All of the other documentation pages still apply here, so refer to them for
general pybind11 usage. This section will cover a few extra things required
for embedding.
Getting started
===============
A basic executable with an embedded interpreter can be created with just a few
lines of CMake and the ``pybind11::embed`` target, as shown below. For more
information, see :doc:`/compiling`.
.. code-block:: cmake
cmake_minimum_required(VERSION 3.0)
project(example)
find_package(pybind11 REQUIRED) # or `add_subdirectory(pybind11)`
add_executable(example main.cpp)
target_link_libraries(example PRIVATE pybind11::embed)
The essential structure of the ``main.cpp`` file looks like this:
.. code-block:: cpp
#include <pybind11/embed.h> // everything needed for embedding
namespace py = pybind11;
int main() {
py::scoped_interpreter guard{}; // start the interpreter and keep it alive
py::print("Hello, World!"); // use the Python API
}
The interpreter must be initialized before using any Python API, which includes
all the functions and classes in pybind11. The RAII guard class `scoped_interpreter`
takes care of the interpreter lifetime. After the guard is destroyed, the interpreter
shuts down and clears its memory. No Python functions can be called after this.
Executing Python code
=====================
There are a few different ways to run Python code. One option is to use `eval`,
`exec` or `eval_file`, as explained in :ref:`eval`. Here is a quick example in
the context of an executable with an embedded interpreter:
.. code-block:: cpp
#include <pybind11/embed.h>
namespace py = pybind11;
int main() {
py::scoped_interpreter guard{};
py::exec(R"(
kwargs = dict(name="World", number=42)
message = "Hello, {name}! The answer is {number}".format(**kwargs)
print(message)
)");
}
Alternatively, similar results can be achieved using pybind11's API (see
:doc:`/advanced/pycpp/index` for more details).
.. code-block:: cpp
#include <pybind11/embed.h>
namespace py = pybind11;
using namespace py::literals;
int main() {
py::scoped_interpreter guard{};
auto kwargs = py::dict("name"_a="World", "number"_a=42);
auto message = "Hello, {name}! The answer is {number}"_s.format(**kwargs);
py::print(message);
}
The two approaches can also be combined:
.. code-block:: cpp
#include <pybind11/embed.h>
#include <iostream>
namespace py = pybind11;
using namespace py::literals;
int main() {
py::scoped_interpreter guard{};
auto locals = py::dict("name"_a="World", "number"_a=42);
py::exec(R"(
message = "Hello, {name}! The answer is {number}".format(**locals())
)", py::globals(), locals);
auto message = locals["message"].cast<std::string>();
std::cout << message;
}
Importing modules
=================
Python modules can be imported using `module::import()`:
.. code-block:: cpp
py::module sys = py::module::import("sys");
py::print(sys.attr("path"));
For convenience, the current working directory is included in ``sys.path`` when
embedding the interpreter. This makes it easy to import local Python files:
.. code-block:: python
"""calc.py located in the working directory"""
def add(i, j):
return i + j
.. code-block:: cpp
py::module calc = py::module::import("calc");
py::object result = calc.attr("add")(1, 2);
int n = result.cast<int>();
assert(n == 3);
Modules can be reloaded using `module::reload()` if the source is modified e.g.
by an external process. This can be useful in scenarios where the application
imports a user defined data processing script which needs to be updated after
changes by the user. Note that this function does not reload modules recursively.
.. _embedding_modules:
Adding embedded modules
=======================
Embedded binary modules can be added using the `PYBIND11_EMBEDDED_MODULE` macro.
Note that the definition must be placed at global scope. They can be imported
like any other module.
.. code-block:: cpp
#include <pybind11/embed.h>
namespace py = pybind11;
PYBIND11_EMBEDDED_MODULE(fast_calc, m) {
// `m` is a `py::module` which is used to bind functions and classes
m.def("add", [](int i, int j) {
return i + j;
});
}
int main() {
py::scoped_interpreter guard{};
auto fast_calc = py::module::import("fast_calc");
auto result = fast_calc.attr("add")(1, 2).cast<int>();
assert(result == 3);
}
Unlike extension modules where only a single binary module can be created, on
the embedded side an unlimited number of modules can be added using multiple
`PYBIND11_EMBEDDED_MODULE` definitions (as long as they have unique names).
These modules are added to Python's list of builtins, so they can also be
imported in pure Python files loaded by the interpreter. Everything interacts
naturally:
.. code-block:: python
"""py_module.py located in the working directory"""
import cpp_module
a = cpp_module.a
b = a + 1
.. code-block:: cpp
#include <pybind11/embed.h>
namespace py = pybind11;
PYBIND11_EMBEDDED_MODULE(cpp_module, m) {
m.attr("a") = 1;
}
int main() {
py::scoped_interpreter guard{};
auto py_module = py::module::import("py_module");
auto locals = py::dict("fmt"_a="{} + {} = {}", **py_module.attr("__dict__"));
assert(locals["a"].cast<int>() == 1);
assert(locals["b"].cast<int>() == 2);
py::exec(R"(
c = a + b
message = fmt.format(a, b, c)
)", py::globals(), locals);
assert(locals["c"].cast<int>() == 3);
assert(locals["message"].cast<std::string>() == "1 + 2 = 3");
}
Interpreter lifetime
====================
The Python interpreter shuts down when `scoped_interpreter` is destroyed. After
this, creating a new instance will restart the interpreter. Alternatively, the
`initialize_interpreter` / `finalize_interpreter` pair of functions can be used
to directly set the state at any time.
Modules created with pybind11 can be safely re-initialized after the interpreter
has been restarted. However, this may not apply to third-party extension modules.
The issue is that Python itself cannot completely unload extension modules and
there are several caveats with regard to interpreter restarting. In short, not
all memory may be freed, either due to Python reference cycles or user-created
global data. All the details can be found in the CPython documentation.
.. warning::
Creating two concurrent `scoped_interpreter` guards is a fatal error. So is
calling `initialize_interpreter` for a second time after the interpreter
has already been initialized.
Do not use the raw CPython API functions ``Py_Initialize`` and
``Py_Finalize`` as these do not properly handle the lifetime of
pybind11's internal data.
Sub-interpreter support
=======================
Creating multiple copies of `scoped_interpreter` is not possible because it
represents the main Python interpreter. Sub-interpreters are something different
and they do permit the existence of multiple interpreters. This is an advanced
feature of the CPython API and should be handled with care. pybind11 does not
currently offer a C++ interface for sub-interpreters, so refer to the CPython
documentation for all the details regarding this feature.
We'll just mention a couple of caveats the sub-interpreters support in pybind11:
1. Sub-interpreters will not receive independent copies of embedded modules.
Instead, these are shared and modifications in one interpreter may be
reflected in another.
2. Managing multiple threads, multiple interpreters and the GIL can be
challenging and there are several caveats here, even within the pure
CPython API (please refer to the Python docs for details). As for
pybind11, keep in mind that `gil_scoped_release` and `gil_scoped_acquire`
do not take sub-interpreters into account.
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/advanced/embedding.rst | embedding.rst |
Miscellaneous
#############
.. _macro_notes:
General notes regarding convenience macros
==========================================
pybind11 provides a few convenience macros such as
:func:`PYBIND11_DECLARE_HOLDER_TYPE` and ``PYBIND11_OVERLOAD_*``. Since these
are "just" macros that are evaluated in the preprocessor (which has no concept
of types), they *will* get confused by commas in a template argument; for
example, consider:
.. code-block:: cpp
PYBIND11_OVERLOAD(MyReturnType<T1, T2>, Class<T3, T4>, func)
The limitation of the C preprocessor interprets this as five arguments (with new
arguments beginning after each comma) rather than three. To get around this,
there are two alternatives: you can use a type alias, or you can wrap the type
using the ``PYBIND11_TYPE`` macro:
.. code-block:: cpp
// Version 1: using a type alias
using ReturnType = MyReturnType<T1, T2>;
using ClassType = Class<T3, T4>;
PYBIND11_OVERLOAD(ReturnType, ClassType, func);
// Version 2: using the PYBIND11_TYPE macro:
PYBIND11_OVERLOAD(PYBIND11_TYPE(MyReturnType<T1, T2>),
PYBIND11_TYPE(Class<T3, T4>), func)
The ``PYBIND11_MAKE_OPAQUE`` macro does *not* require the above workarounds.
.. _gil:
Global Interpreter Lock (GIL)
=============================
When calling a C++ function from Python, the GIL is always held.
The classes :class:`gil_scoped_release` and :class:`gil_scoped_acquire` can be
used to acquire and release the global interpreter lock in the body of a C++
function call. In this way, long-running C++ code can be parallelized using
multiple Python threads. Taking :ref:`overriding_virtuals` as an example, this
could be realized as follows (important changes highlighted):
.. code-block:: cpp
:emphasize-lines: 8,9,31,32
class PyAnimal : public Animal {
public:
/* Inherit the constructors */
using Animal::Animal;
/* Trampoline (need one for each virtual function) */
std::string go(int n_times) {
/* Acquire GIL before calling Python code */
py::gil_scoped_acquire acquire;
PYBIND11_OVERLOAD_PURE(
std::string, /* Return type */
Animal, /* Parent class */
go, /* Name of function */
n_times /* Argument(s) */
);
}
};
PYBIND11_MODULE(example, m) {
py::class_<Animal, PyAnimal> animal(m, "Animal");
animal
.def(py::init<>())
.def("go", &Animal::go);
py::class_<Dog>(m, "Dog", animal)
.def(py::init<>());
m.def("call_go", [](Animal *animal) -> std::string {
/* Release GIL before calling into (potentially long-running) C++ code */
py::gil_scoped_release release;
return call_go(animal);
});
}
The ``call_go`` wrapper can also be simplified using the `call_guard` policy
(see :ref:`call_policies`) which yields the same result:
.. code-block:: cpp
m.def("call_go", &call_go, py::call_guard<py::gil_scoped_release>());
Binding sequence data types, iterators, the slicing protocol, etc.
==================================================================
Please refer to the supplemental example for details.
.. seealso::
The file :file:`tests/test_sequences_and_iterators.cpp` contains a
complete example that shows how to bind a sequence data type, including
length queries (``__len__``), iterators (``__iter__``), the slicing
protocol and other kinds of useful operations.
Partitioning code over multiple extension modules
=================================================
It's straightforward to split binding code over multiple extension modules,
while referencing types that are declared elsewhere. Everything "just" works
without any special precautions. One exception to this rule occurs when
extending a type declared in another extension module. Recall the basic example
from Section :ref:`inheritance`.
.. code-block:: cpp
py::class_<Pet> pet(m, "Pet");
pet.def(py::init<const std::string &>())
.def_readwrite("name", &Pet::name);
py::class_<Dog>(m, "Dog", pet /* <- specify parent */)
.def(py::init<const std::string &>())
.def("bark", &Dog::bark);
Suppose now that ``Pet`` bindings are defined in a module named ``basic``,
whereas the ``Dog`` bindings are defined somewhere else. The challenge is of
course that the variable ``pet`` is not available anymore though it is needed
to indicate the inheritance relationship to the constructor of ``class_<Dog>``.
However, it can be acquired as follows:
.. code-block:: cpp
py::object pet = (py::object) py::module::import("basic").attr("Pet");
py::class_<Dog>(m, "Dog", pet)
.def(py::init<const std::string &>())
.def("bark", &Dog::bark);
Alternatively, you can specify the base class as a template parameter option to
``class_``, which performs an automated lookup of the corresponding Python
type. Like the above code, however, this also requires invoking the ``import``
function once to ensure that the pybind11 binding code of the module ``basic``
has been executed:
.. code-block:: cpp
py::module::import("basic");
py::class_<Dog, Pet>(m, "Dog")
.def(py::init<const std::string &>())
.def("bark", &Dog::bark);
Naturally, both methods will fail when there are cyclic dependencies.
Note that pybind11 code compiled with hidden-by-default symbol visibility (e.g.
via the command line flag ``-fvisibility=hidden`` on GCC/Clang), which is
required for proper pybind11 functionality, can interfere with the ability to
access types defined in another extension module. Working around this requires
manually exporting types that are accessed by multiple extension modules;
pybind11 provides a macro to do just this:
.. code-block:: cpp
class PYBIND11_EXPORT Dog : public Animal {
...
};
Note also that it is possible (although would rarely be required) to share arbitrary
C++ objects between extension modules at runtime. Internal library data is shared
between modules using capsule machinery [#f6]_ which can be also utilized for
storing, modifying and accessing user-defined data. Note that an extension module
will "see" other extensions' data if and only if they were built with the same
pybind11 version. Consider the following example:
.. code-block:: cpp
auto data = (MyData *) py::get_shared_data("mydata");
if (!data)
data = (MyData *) py::set_shared_data("mydata", new MyData(42));
If the above snippet was used in several separately compiled extension modules,
the first one to be imported would create a ``MyData`` instance and associate
a ``"mydata"`` key with a pointer to it. Extensions that are imported later
would be then able to access the data behind the same pointer.
.. [#f6] https://docs.python.org/3/extending/extending.html#using-capsules
Module Destructors
==================
pybind11 does not provide an explicit mechanism to invoke cleanup code at
module destruction time. In rare cases where such functionality is required, it
is possible to emulate it using Python capsules or weak references with a
destruction callback.
.. code-block:: cpp
auto cleanup_callback = []() {
// perform cleanup here -- this function is called with the GIL held
};
m.add_object("_cleanup", py::capsule(cleanup_callback));
This approach has the potential downside that instances of classes exposed
within the module may still be alive when the cleanup callback is invoked
(whether this is acceptable will generally depend on the application).
Alternatively, the capsule may also be stashed within a type object, which
ensures that it not called before all instances of that type have been
collected:
.. code-block:: cpp
auto cleanup_callback = []() { /* ... */ };
m.attr("BaseClass").attr("_cleanup") = py::capsule(cleanup_callback);
Both approaches also expose a potentially dangerous ``_cleanup`` attribute in
Python, which may be undesirable from an API standpoint (a premature explicit
call from Python might lead to undefined behavior). Yet another approach that
avoids this issue involves weak reference with a cleanup callback:
.. code-block:: cpp
// Register a callback function that is invoked when the BaseClass object is colelcted
py::cpp_function cleanup_callback(
[](py::handle weakref) {
// perform cleanup here -- this function is called with the GIL held
weakref.dec_ref(); // release weak reference
}
);
// Create a weak reference with a cleanup callback and initially leak it
(void) py::weakref(m.attr("BaseClass"), cleanup_callback).release();
.. note::
PyPy (at least version 5.9) does not garbage collect objects when the
interpreter exits. An alternative approach (which also works on CPython) is to use
the :py:mod:`atexit` module [#f7]_, for example:
.. code-block:: cpp
auto atexit = py::module::import("atexit");
atexit.attr("register")(py::cpp_function([]() {
// perform cleanup here -- this function is called with the GIL held
}));
.. [#f7] https://docs.python.org/3/library/atexit.html
Generating documentation using Sphinx
=====================================
Sphinx [#f4]_ has the ability to inspect the signatures and documentation
strings in pybind11-based extension modules to automatically generate beautiful
documentation in a variety formats. The python_example repository [#f5]_ contains a
simple example repository which uses this approach.
There are two potential gotchas when using this approach: first, make sure that
the resulting strings do not contain any :kbd:`TAB` characters, which break the
docstring parsing routines. You may want to use C++11 raw string literals,
which are convenient for multi-line comments. Conveniently, any excess
indentation will be automatically be removed by Sphinx. However, for this to
work, it is important that all lines are indented consistently, i.e.:
.. code-block:: cpp
// ok
m.def("foo", &foo, R"mydelimiter(
The foo function
Parameters
----------
)mydelimiter");
// *not ok*
m.def("foo", &foo, R"mydelimiter(The foo function
Parameters
----------
)mydelimiter");
By default, pybind11 automatically generates and prepends a signature to the docstring of a function
registered with ``module::def()`` and ``class_::def()``. Sometimes this
behavior is not desirable, because you want to provide your own signature or remove
the docstring completely to exclude the function from the Sphinx documentation.
The class ``options`` allows you to selectively suppress auto-generated signatures:
.. code-block:: cpp
PYBIND11_MODULE(example, m) {
py::options options;
options.disable_function_signatures();
m.def("add", [](int a, int b) { return a + b; }, "A function which adds two numbers");
}
Note that changes to the settings affect only function bindings created during the
lifetime of the ``options`` instance. When it goes out of scope at the end of the module's init function,
the default settings are restored to prevent unwanted side effects.
.. [#f4] http://www.sphinx-doc.org
.. [#f5] http://github.com/pybind/python_example
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/advanced/misc.rst | misc.rst |
Utilities
#########
Using Python's print function in C++
====================================
The usual way to write output in C++ is using ``std::cout`` while in Python one
would use ``print``. Since these methods use different buffers, mixing them can
lead to output order issues. To resolve this, pybind11 modules can use the
:func:`py::print` function which writes to Python's ``sys.stdout`` for consistency.
Python's ``print`` function is replicated in the C++ API including optional
keyword arguments ``sep``, ``end``, ``file``, ``flush``. Everything works as
expected in Python:
.. code-block:: cpp
py::print(1, 2.0, "three"); // 1 2.0 three
py::print(1, 2.0, "three", "sep"_a="-"); // 1-2.0-three
auto args = py::make_tuple("unpacked", true);
py::print("->", *args, "end"_a="<-"); // -> unpacked True <-
.. _ostream_redirect:
Capturing standard output from ostream
======================================
Often, a library will use the streams ``std::cout`` and ``std::cerr`` to print,
but this does not play well with Python's standard ``sys.stdout`` and ``sys.stderr``
redirection. Replacing a library's printing with `py::print <print>` may not
be feasible. This can be fixed using a guard around the library function that
redirects output to the corresponding Python streams:
.. code-block:: cpp
#include <pybind11/iostream.h>
...
// Add a scoped redirect for your noisy code
m.def("noisy_func", []() {
py::scoped_ostream_redirect stream(
std::cout, // std::ostream&
py::module::import("sys").attr("stdout") // Python output
);
call_noisy_func();
});
This method respects flushes on the output streams and will flush if needed
when the scoped guard is destroyed. This allows the output to be redirected in
real time, such as to a Jupyter notebook. The two arguments, the C++ stream and
the Python output, are optional, and default to standard output if not given. An
extra type, `py::scoped_estream_redirect <scoped_estream_redirect>`, is identical
except for defaulting to ``std::cerr`` and ``sys.stderr``; this can be useful with
`py::call_guard`, which allows multiple items, but uses the default constructor:
.. code-block:: py
// Alternative: Call single function using call guard
m.def("noisy_func", &call_noisy_function,
py::call_guard<py::scoped_ostream_redirect,
py::scoped_estream_redirect>());
The redirection can also be done in Python with the addition of a context
manager, using the `py::add_ostream_redirect() <add_ostream_redirect>` function:
.. code-block:: cpp
py::add_ostream_redirect(m, "ostream_redirect");
The name in Python defaults to ``ostream_redirect`` if no name is passed. This
creates the following context manager in Python:
.. code-block:: python
with ostream_redirect(stdout=True, stderr=True):
noisy_function()
It defaults to redirecting both streams, though you can use the keyword
arguments to disable one of the streams if needed.
.. note::
The above methods will not redirect C-level output to file descriptors, such
as ``fprintf``. For those cases, you'll need to redirect the file
descriptors either directly in C or with Python's ``os.dup2`` function
in an operating-system dependent way.
.. _eval:
Evaluating Python expressions from strings and files
====================================================
pybind11 provides the `eval`, `exec` and `eval_file` functions to evaluate
Python expressions and statements. The following example illustrates how they
can be used.
.. code-block:: cpp
// At beginning of file
#include <pybind11/eval.h>
...
// Evaluate in scope of main module
py::object scope = py::module::import("__main__").attr("__dict__");
// Evaluate an isolated expression
int result = py::eval("my_variable + 10", scope).cast<int>();
// Evaluate a sequence of statements
py::exec(
"print('Hello')\n"
"print('world!');",
scope);
// Evaluate the statements in an separate Python file on disk
py::eval_file("script.py", scope);
C++11 raw string literals are also supported and quite handy for this purpose.
The only requirement is that the first statement must be on a new line following
the raw string delimiter ``R"(``, ensuring all lines have common leading indent:
.. code-block:: cpp
py::exec(R"(
x = get_answer()
if x == 42:
print('Hello World!')
else:
print('Bye!')
)", scope
);
.. note::
`eval` and `eval_file` accept a template parameter that describes how the
string/file should be interpreted. Possible choices include ``eval_expr``
(isolated expression), ``eval_single_statement`` (a single statement, return
value is always ``none``), and ``eval_statements`` (sequence of statements,
return value is always ``none``). `eval` defaults to ``eval_expr``,
`eval_file` defaults to ``eval_statements`` and `exec` is just a shortcut
for ``eval<eval_statements>``.
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/advanced/pycpp/utilities.rst | utilities.rst |
.. _numpy:
NumPy
#####
Buffer protocol
===============
Python supports an extremely general and convenient approach for exchanging
data between plugin libraries. Types can expose a buffer view [#f2]_, which
provides fast direct access to the raw internal data representation. Suppose we
want to bind the following simplistic Matrix class:
.. code-block:: cpp
class Matrix {
public:
Matrix(size_t rows, size_t cols) : m_rows(rows), m_cols(cols) {
m_data = new float[rows*cols];
}
float *data() { return m_data; }
size_t rows() const { return m_rows; }
size_t cols() const { return m_cols; }
private:
size_t m_rows, m_cols;
float *m_data;
};
The following binding code exposes the ``Matrix`` contents as a buffer object,
making it possible to cast Matrices into NumPy arrays. It is even possible to
completely avoid copy operations with Python expressions like
``np.array(matrix_instance, copy = False)``.
.. code-block:: cpp
py::class_<Matrix>(m, "Matrix", py::buffer_protocol())
.def_buffer([](Matrix &m) -> py::buffer_info {
return py::buffer_info(
m.data(), /* Pointer to buffer */
sizeof(float), /* Size of one scalar */
py::format_descriptor<float>::format(), /* Python struct-style format descriptor */
2, /* Number of dimensions */
{ m.rows(), m.cols() }, /* Buffer dimensions */
{ sizeof(float) * m.cols(), /* Strides (in bytes) for each index */
sizeof(float) }
);
});
Supporting the buffer protocol in a new type involves specifying the special
``py::buffer_protocol()`` tag in the ``py::class_`` constructor and calling the
``def_buffer()`` method with a lambda function that creates a
``py::buffer_info`` description record on demand describing a given matrix
instance. The contents of ``py::buffer_info`` mirror the Python buffer protocol
specification.
.. code-block:: cpp
struct buffer_info {
void *ptr;
ssize_t itemsize;
std::string format;
ssize_t ndim;
std::vector<ssize_t> shape;
std::vector<ssize_t> strides;
};
To create a C++ function that can take a Python buffer object as an argument,
simply use the type ``py::buffer`` as one of its arguments. Buffers can exist
in a great variety of configurations, hence some safety checks are usually
necessary in the function body. Below, you can see an basic example on how to
define a custom constructor for the Eigen double precision matrix
(``Eigen::MatrixXd``) type, which supports initialization from compatible
buffer objects (e.g. a NumPy matrix).
.. code-block:: cpp
/* Bind MatrixXd (or some other Eigen type) to Python */
typedef Eigen::MatrixXd Matrix;
typedef Matrix::Scalar Scalar;
constexpr bool rowMajor = Matrix::Flags & Eigen::RowMajorBit;
py::class_<Matrix>(m, "Matrix", py::buffer_protocol())
.def("__init__", [](Matrix &m, py::buffer b) {
typedef Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic> Strides;
/* Request a buffer descriptor from Python */
py::buffer_info info = b.request();
/* Some sanity checks ... */
if (info.format != py::format_descriptor<Scalar>::format())
throw std::runtime_error("Incompatible format: expected a double array!");
if (info.ndim != 2)
throw std::runtime_error("Incompatible buffer dimension!");
auto strides = Strides(
info.strides[rowMajor ? 0 : 1] / (py::ssize_t)sizeof(Scalar),
info.strides[rowMajor ? 1 : 0] / (py::ssize_t)sizeof(Scalar));
auto map = Eigen::Map<Matrix, 0, Strides>(
static_cast<Scalar *>(info.ptr), info.shape[0], info.shape[1], strides);
new (&m) Matrix(map);
});
For reference, the ``def_buffer()`` call for this Eigen data type should look
as follows:
.. code-block:: cpp
.def_buffer([](Matrix &m) -> py::buffer_info {
return py::buffer_info(
m.data(), /* Pointer to buffer */
sizeof(Scalar), /* Size of one scalar */
py::format_descriptor<Scalar>::format(), /* Python struct-style format descriptor */
2, /* Number of dimensions */
{ m.rows(), m.cols() }, /* Buffer dimensions */
{ sizeof(Scalar) * (rowMajor ? m.cols() : 1),
sizeof(Scalar) * (rowMajor ? 1 : m.rows()) }
/* Strides (in bytes) for each index */
);
})
For a much easier approach of binding Eigen types (although with some
limitations), refer to the section on :doc:`/advanced/cast/eigen`.
.. seealso::
The file :file:`tests/test_buffers.cpp` contains a complete example
that demonstrates using the buffer protocol with pybind11 in more detail.
.. [#f2] http://docs.python.org/3/c-api/buffer.html
Arrays
======
By exchanging ``py::buffer`` with ``py::array`` in the above snippet, we can
restrict the function so that it only accepts NumPy arrays (rather than any
type of Python object satisfying the buffer protocol).
In many situations, we want to define a function which only accepts a NumPy
array of a certain data type. This is possible via the ``py::array_t<T>``
template. For instance, the following function requires the argument to be a
NumPy array containing double precision values.
.. code-block:: cpp
void f(py::array_t<double> array);
When it is invoked with a different type (e.g. an integer or a list of
integers), the binding code will attempt to cast the input into a NumPy array
of the requested type. Note that this feature requires the
:file:`pybind11/numpy.h` header to be included.
Data in NumPy arrays is not guaranteed to packed in a dense manner;
furthermore, entries can be separated by arbitrary column and row strides.
Sometimes, it can be useful to require a function to only accept dense arrays
using either the C (row-major) or Fortran (column-major) ordering. This can be
accomplished via a second template argument with values ``py::array::c_style``
or ``py::array::f_style``.
.. code-block:: cpp
void f(py::array_t<double, py::array::c_style | py::array::forcecast> array);
The ``py::array::forcecast`` argument is the default value of the second
template parameter, and it ensures that non-conforming arguments are converted
into an array satisfying the specified requirements instead of trying the next
function overload.
Structured types
================
In order for ``py::array_t`` to work with structured (record) types, we first
need to register the memory layout of the type. This can be done via
``PYBIND11_NUMPY_DTYPE`` macro, called in the plugin definition code, which
expects the type followed by field names:
.. code-block:: cpp
struct A {
int x;
double y;
};
struct B {
int z;
A a;
};
// ...
PYBIND11_MODULE(test, m) {
// ...
PYBIND11_NUMPY_DTYPE(A, x, y);
PYBIND11_NUMPY_DTYPE(B, z, a);
/* now both A and B can be used as template arguments to py::array_t */
}
The structure should consist of fundamental arithmetic types, ``std::complex``,
previously registered substructures, and arrays of any of the above. Both C++
arrays and ``std::array`` are supported. While there is a static assertion to
prevent many types of unsupported structures, it is still the user's
responsibility to use only "plain" structures that can be safely manipulated as
raw memory without violating invariants.
Vectorizing functions
=====================
Suppose we want to bind a function with the following signature to Python so
that it can process arbitrary NumPy array arguments (vectors, matrices, general
N-D arrays) in addition to its normal arguments:
.. code-block:: cpp
double my_func(int x, float y, double z);
After including the ``pybind11/numpy.h`` header, this is extremely simple:
.. code-block:: cpp
m.def("vectorized_func", py::vectorize(my_func));
Invoking the function like below causes 4 calls to be made to ``my_func`` with
each of the array elements. The significant advantage of this compared to
solutions like ``numpy.vectorize()`` is that the loop over the elements runs
entirely on the C++ side and can be crunched down into a tight, optimized loop
by the compiler. The result is returned as a NumPy array of type
``numpy.dtype.float64``.
.. code-block:: pycon
>>> x = np.array([[1, 3],[5, 7]])
>>> y = np.array([[2, 4],[6, 8]])
>>> z = 3
>>> result = vectorized_func(x, y, z)
The scalar argument ``z`` is transparently replicated 4 times. The input
arrays ``x`` and ``y`` are automatically converted into the right types (they
are of type ``numpy.dtype.int64`` but need to be ``numpy.dtype.int32`` and
``numpy.dtype.float32``, respectively).
.. note::
Only arithmetic, complex, and POD types passed by value or by ``const &``
reference are vectorized; all other arguments are passed through as-is.
Functions taking rvalue reference arguments cannot be vectorized.
In cases where the computation is too complicated to be reduced to
``vectorize``, it will be necessary to create and access the buffer contents
manually. The following snippet contains a complete example that shows how this
works (the code is somewhat contrived, since it could have been done more
simply using ``vectorize``).
.. code-block:: cpp
#include <pybind11/pybind11.h>
#include <pybind11/numpy.h>
namespace py = pybind11;
py::array_t<double> add_arrays(py::array_t<double> input1, py::array_t<double> input2) {
py::buffer_info buf1 = input1.request(), buf2 = input2.request();
if (buf1.ndim != 1 || buf2.ndim != 1)
throw std::runtime_error("Number of dimensions must be one");
if (buf1.size != buf2.size)
throw std::runtime_error("Input shapes must match");
/* No pointer is passed, so NumPy will allocate the buffer */
auto result = py::array_t<double>(buf1.size);
py::buffer_info buf3 = result.request();
double *ptr1 = (double *) buf1.ptr,
*ptr2 = (double *) buf2.ptr,
*ptr3 = (double *) buf3.ptr;
for (size_t idx = 0; idx < buf1.shape[0]; idx++)
ptr3[idx] = ptr1[idx] + ptr2[idx];
return result;
}
PYBIND11_MODULE(test, m) {
m.def("add_arrays", &add_arrays, "Add two NumPy arrays");
}
.. seealso::
The file :file:`tests/test_numpy_vectorize.cpp` contains a complete
example that demonstrates using :func:`vectorize` in more detail.
Direct access
=============
For performance reasons, particularly when dealing with very large arrays, it
is often desirable to directly access array elements without internal checking
of dimensions and bounds on every access when indices are known to be already
valid. To avoid such checks, the ``array`` class and ``array_t<T>`` template
class offer an unchecked proxy object that can be used for this unchecked
access through the ``unchecked<N>`` and ``mutable_unchecked<N>`` methods,
where ``N`` gives the required dimensionality of the array:
.. code-block:: cpp
m.def("sum_3d", [](py::array_t<double> x) {
auto r = x.unchecked<3>(); // x must have ndim = 3; can be non-writeable
double sum = 0;
for (ssize_t i = 0; i < r.shape(0); i++)
for (ssize_t j = 0; j < r.shape(1); j++)
for (ssize_t k = 0; k < r.shape(2); k++)
sum += r(i, j, k);
return sum;
});
m.def("increment_3d", [](py::array_t<double> x) {
auto r = x.mutable_unchecked<3>(); // Will throw if ndim != 3 or flags.writeable is false
for (ssize_t i = 0; i < r.shape(0); i++)
for (ssize_t j = 0; j < r.shape(1); j++)
for (ssize_t k = 0; k < r.shape(2); k++)
r(i, j, k) += 1.0;
}, py::arg().noconvert());
To obtain the proxy from an ``array`` object, you must specify both the data
type and number of dimensions as template arguments, such as ``auto r =
myarray.mutable_unchecked<float, 2>()``.
If the number of dimensions is not known at compile time, you can omit the
dimensions template parameter (i.e. calling ``arr_t.unchecked()`` or
``arr.unchecked<T>()``. This will give you a proxy object that works in the
same way, but results in less optimizable code and thus a small efficiency
loss in tight loops.
Note that the returned proxy object directly references the array's data, and
only reads its shape, strides, and writeable flag when constructed. You must
take care to ensure that the referenced array is not destroyed or reshaped for
the duration of the returned object, typically by limiting the scope of the
returned instance.
The returned proxy object supports some of the same methods as ``py::array`` so
that it can be used as a drop-in replacement for some existing, index-checked
uses of ``py::array``:
- ``r.ndim()`` returns the number of dimensions
- ``r.data(1, 2, ...)`` and ``r.mutable_data(1, 2, ...)``` returns a pointer to
the ``const T`` or ``T`` data, respectively, at the given indices. The
latter is only available to proxies obtained via ``a.mutable_unchecked()``.
- ``itemsize()`` returns the size of an item in bytes, i.e. ``sizeof(T)``.
- ``ndim()`` returns the number of dimensions.
- ``shape(n)`` returns the size of dimension ``n``
- ``size()`` returns the total number of elements (i.e. the product of the shapes).
- ``nbytes()`` returns the number of bytes used by the referenced elements
(i.e. ``itemsize()`` times ``size()``).
.. seealso::
The file :file:`tests/test_numpy_array.cpp` contains additional examples
demonstrating the use of this feature.
Ellipsis
========
Python 3 provides a convenient ``...`` ellipsis notation that is often used to
slice multidimensional arrays. For instance, the following snippet extracts the
middle dimensions of a tensor with the first and last index set to zero.
.. code-block:: python
a = # a NumPy array
b = a[0, ..., 0]
The function ``py::ellipsis()`` function can be used to perform the same
operation on the C++ side:
.. code-block:: cpp
py::array a = /* A NumPy array */;
py::array b = a[py::make_tuple(0, py::ellipsis(), 0)];
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/advanced/pycpp/numpy.rst | numpy.rst |
Python C++ interface
####################
pybind11 exposes Python types and functions using thin C++ wrappers, which
makes it possible to conveniently call Python code from C++ without resorting
to Python's C API.
.. toctree::
:maxdepth: 2
object
numpy
utilities
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/advanced/pycpp/index.rst | index.rst |
Python types
############
Available wrappers
==================
All major Python types are available as thin C++ wrapper classes. These
can also be used as function parameters -- see :ref:`python_objects_as_args`.
Available types include :class:`handle`, :class:`object`, :class:`bool_`,
:class:`int_`, :class:`float_`, :class:`str`, :class:`bytes`, :class:`tuple`,
:class:`list`, :class:`dict`, :class:`slice`, :class:`none`, :class:`capsule`,
:class:`iterable`, :class:`iterator`, :class:`function`, :class:`buffer`,
:class:`array`, and :class:`array_t`.
Casting back and forth
======================
In this kind of mixed code, it is often necessary to convert arbitrary C++
types to Python, which can be done using :func:`py::cast`:
.. code-block:: cpp
MyClass *cls = ..;
py::object obj = py::cast(cls);
The reverse direction uses the following syntax:
.. code-block:: cpp
py::object obj = ...;
MyClass *cls = obj.cast<MyClass *>();
When conversion fails, both directions throw the exception :class:`cast_error`.
.. _python_libs:
Accessing Python libraries from C++
===================================
It is also possible to import objects defined in the Python standard
library or available in the current Python environment (``sys.path``) and work
with these in C++.
This example obtains a reference to the Python ``Decimal`` class.
.. code-block:: cpp
// Equivalent to "from decimal import Decimal"
py::object Decimal = py::module::import("decimal").attr("Decimal");
.. code-block:: cpp
// Try to import scipy
py::object scipy = py::module::import("scipy");
return scipy.attr("__version__");
.. _calling_python_functions:
Calling Python functions
========================
It is also possible to call Python classes, functions and methods
via ``operator()``.
.. code-block:: cpp
// Construct a Python object of class Decimal
py::object pi = Decimal("3.14159");
.. code-block:: cpp
// Use Python to make our directories
py::object os = py::module::import("os");
py::object makedirs = os.attr("makedirs");
makedirs("/tmp/path/to/somewhere");
One can convert the result obtained from Python to a pure C++ version
if a ``py::class_`` or type conversion is defined.
.. code-block:: cpp
py::function f = <...>;
py::object result_py = f(1234, "hello", some_instance);
MyClass &result = result_py.cast<MyClass>();
.. _calling_python_methods:
Calling Python methods
========================
To call an object's method, one can again use ``.attr`` to obtain access to the
Python method.
.. code-block:: cpp
// Calculate e^π in decimal
py::object exp_pi = pi.attr("exp")();
py::print(py::str(exp_pi));
In the example above ``pi.attr("exp")`` is a *bound method*: it will always call
the method for that same instance of the class. Alternately one can create an
*unbound method* via the Python class (instead of instance) and pass the ``self``
object explicitly, followed by other arguments.
.. code-block:: cpp
py::object decimal_exp = Decimal.attr("exp");
// Compute the e^n for n=0..4
for (int n = 0; n < 5; n++) {
py::print(decimal_exp(Decimal(n));
}
Keyword arguments
=================
Keyword arguments are also supported. In Python, there is the usual call syntax:
.. code-block:: python
def f(number, say, to):
... # function code
f(1234, say="hello", to=some_instance) # keyword call in Python
In C++, the same call can be made using:
.. code-block:: cpp
using namespace pybind11::literals; // to bring in the `_a` literal
f(1234, "say"_a="hello", "to"_a=some_instance); // keyword call in C++
Unpacking arguments
===================
Unpacking of ``*args`` and ``**kwargs`` is also possible and can be mixed with
other arguments:
.. code-block:: cpp
// * unpacking
py::tuple args = py::make_tuple(1234, "hello", some_instance);
f(*args);
// ** unpacking
py::dict kwargs = py::dict("number"_a=1234, "say"_a="hello", "to"_a=some_instance);
f(**kwargs);
// mixed keywords, * and ** unpacking
py::tuple args = py::make_tuple(1234);
py::dict kwargs = py::dict("to"_a=some_instance);
f(*args, "say"_a="hello", **kwargs);
Generalized unpacking according to PEP448_ is also supported:
.. code-block:: cpp
py::dict kwargs1 = py::dict("number"_a=1234);
py::dict kwargs2 = py::dict("to"_a=some_instance);
f(**kwargs1, "say"_a="hello", **kwargs2);
.. seealso::
The file :file:`tests/test_pytypes.cpp` contains a complete
example that demonstrates passing native Python types in more detail. The
file :file:`tests/test_callbacks.cpp` presents a few examples of calling
Python functions from C++, including keywords arguments and unpacking.
.. _PEP448: https://www.python.org/dev/peps/pep-0448/
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/advanced/pycpp/object.rst | object.rst |
Functional
##########
The following features must be enabled by including :file:`pybind11/functional.h`.
Callbacks and passing anonymous functions
=========================================
The C++11 standard brought lambda functions and the generic polymorphic
function wrapper ``std::function<>`` to the C++ programming language, which
enable powerful new ways of working with functions. Lambda functions come in
two flavors: stateless lambda function resemble classic function pointers that
link to an anonymous piece of code, while stateful lambda functions
additionally depend on captured variables that are stored in an anonymous
*lambda closure object*.
Here is a simple example of a C++ function that takes an arbitrary function
(stateful or stateless) with signature ``int -> int`` as an argument and runs
it with the value 10.
.. code-block:: cpp
int func_arg(const std::function<int(int)> &f) {
return f(10);
}
The example below is more involved: it takes a function of signature ``int -> int``
and returns another function of the same kind. The return value is a stateful
lambda function, which stores the value ``f`` in the capture object and adds 1 to
its return value upon execution.
.. code-block:: cpp
std::function<int(int)> func_ret(const std::function<int(int)> &f) {
return [f](int i) {
return f(i) + 1;
};
}
This example demonstrates using python named parameters in C++ callbacks which
requires using ``py::cpp_function`` as a wrapper. Usage is similar to defining
methods of classes:
.. code-block:: cpp
py::cpp_function func_cpp() {
return py::cpp_function([](int i) { return i+1; },
py::arg("number"));
}
After including the extra header file :file:`pybind11/functional.h`, it is almost
trivial to generate binding code for all of these functions.
.. code-block:: cpp
#include <pybind11/functional.h>
PYBIND11_MODULE(example, m) {
m.def("func_arg", &func_arg);
m.def("func_ret", &func_ret);
m.def("func_cpp", &func_cpp);
}
The following interactive session shows how to call them from Python.
.. code-block:: pycon
$ python
>>> import example
>>> def square(i):
... return i * i
...
>>> example.func_arg(square)
100L
>>> square_plus_1 = example.func_ret(square)
>>> square_plus_1(4)
17L
>>> plus_1 = func_cpp()
>>> plus_1(number=43)
44L
.. warning::
Keep in mind that passing a function from C++ to Python (or vice versa)
will instantiate a piece of wrapper code that translates function
invocations between the two languages. Naturally, this translation
increases the computational cost of each function call somewhat. A
problematic situation can arise when a function is copied back and forth
between Python and C++ many times in a row, in which case the underlying
wrappers will accumulate correspondingly. The resulting long sequence of
C++ -> Python -> C++ -> ... roundtrips can significantly decrease
performance.
There is one exception: pybind11 detects case where a stateless function
(i.e. a function pointer or a lambda function without captured variables)
is passed as an argument to another C++ function exposed in Python. In this
case, there is no overhead. Pybind11 will extract the underlying C++
function pointer from the wrapped function to sidestep a potential C++ ->
Python -> C++ roundtrip. This is demonstrated in :file:`tests/test_callbacks.cpp`.
.. note::
This functionality is very useful when generating bindings for callbacks in
C++ libraries (e.g. GUI libraries, asynchronous networking libraries, etc.).
The file :file:`tests/test_callbacks.cpp` contains a complete example
that demonstrates how to work with callbacks and anonymous functions in
more detail.
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/advanced/cast/functional.rst | functional.rst |
Strings, bytes and Unicode conversions
######################################
.. note::
This section discusses string handling in terms of Python 3 strings. For
Python 2.7, replace all occurrences of ``str`` with ``unicode`` and
``bytes`` with ``str``. Python 2.7 users may find it best to use ``from
__future__ import unicode_literals`` to avoid unintentionally using ``str``
instead of ``unicode``.
Passing Python strings to C++
=============================
When a Python ``str`` is passed from Python to a C++ function that accepts
``std::string`` or ``char *`` as arguments, pybind11 will encode the Python
string to UTF-8. All Python ``str`` can be encoded in UTF-8, so this operation
does not fail.
The C++ language is encoding agnostic. It is the responsibility of the
programmer to track encodings. It's often easiest to simply `use UTF-8
everywhere <http://utf8everywhere.org/>`_.
.. code-block:: c++
m.def("utf8_test",
[](const std::string &s) {
cout << "utf-8 is icing on the cake.\n";
cout << s;
}
);
m.def("utf8_charptr",
[](const char *s) {
cout << "My favorite food is\n";
cout << s;
}
);
.. code-block:: python
>>> utf8_test('🎂')
utf-8 is icing on the cake.
🎂
>>> utf8_charptr('🍕')
My favorite food is
🍕
.. note::
Some terminal emulators do not support UTF-8 or emoji fonts and may not
display the example above correctly.
The results are the same whether the C++ function accepts arguments by value or
reference, and whether or not ``const`` is used.
Passing bytes to C++
--------------------
A Python ``bytes`` object will be passed to C++ functions that accept
``std::string`` or ``char*`` *without* conversion. On Python 3, in order to
make a function *only* accept ``bytes`` (and not ``str``), declare it as taking
a ``py::bytes`` argument.
Returning C++ strings to Python
===============================
When a C++ function returns a ``std::string`` or ``char*`` to a Python caller,
**pybind11 will assume that the string is valid UTF-8** and will decode it to a
native Python ``str``, using the same API as Python uses to perform
``bytes.decode('utf-8')``. If this implicit conversion fails, pybind11 will
raise a ``UnicodeDecodeError``.
.. code-block:: c++
m.def("std_string_return",
[]() {
return std::string("This string needs to be UTF-8 encoded");
}
);
.. code-block:: python
>>> isinstance(example.std_string_return(), str)
True
Because UTF-8 is inclusive of pure ASCII, there is never any issue with
returning a pure ASCII string to Python. If there is any possibility that the
string is not pure ASCII, it is necessary to ensure the encoding is valid
UTF-8.
.. warning::
Implicit conversion assumes that a returned ``char *`` is null-terminated.
If there is no null terminator a buffer overrun will occur.
Explicit conversions
--------------------
If some C++ code constructs a ``std::string`` that is not a UTF-8 string, one
can perform a explicit conversion and return a ``py::str`` object. Explicit
conversion has the same overhead as implicit conversion.
.. code-block:: c++
// This uses the Python C API to convert Latin-1 to Unicode
m.def("str_output",
[]() {
std::string s = "Send your r\xe9sum\xe9 to Alice in HR"; // Latin-1
py::str py_s = PyUnicode_DecodeLatin1(s.data(), s.length());
return py_s;
}
);
.. code-block:: python
>>> str_output()
'Send your résumé to Alice in HR'
The `Python C API
<https://docs.python.org/3/c-api/unicode.html#built-in-codecs>`_ provides
several built-in codecs.
One could also use a third party encoding library such as libiconv to transcode
to UTF-8.
Return C++ strings without conversion
-------------------------------------
If the data in a C++ ``std::string`` does not represent text and should be
returned to Python as ``bytes``, then one can return the data as a
``py::bytes`` object.
.. code-block:: c++
m.def("return_bytes",
[]() {
std::string s("\xba\xd0\xba\xd0"); // Not valid UTF-8
return py::bytes(s); // Return the data without transcoding
}
);
.. code-block:: python
>>> example.return_bytes()
b'\xba\xd0\xba\xd0'
Note the asymmetry: pybind11 will convert ``bytes`` to ``std::string`` without
encoding, but cannot convert ``std::string`` back to ``bytes`` implicitly.
.. code-block:: c++
m.def("asymmetry",
[](std::string s) { // Accepts str or bytes from Python
return s; // Looks harmless, but implicitly converts to str
}
);
.. code-block:: python
>>> isinstance(example.asymmetry(b"have some bytes"), str)
True
>>> example.asymmetry(b"\xba\xd0\xba\xd0") # invalid utf-8 as bytes
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xba in position 0: invalid start byte
Wide character strings
======================
When a Python ``str`` is passed to a C++ function expecting ``std::wstring``,
``wchar_t*``, ``std::u16string`` or ``std::u32string``, the ``str`` will be
encoded to UTF-16 or UTF-32 depending on how the C++ compiler implements each
type, in the platform's native endianness. When strings of these types are
returned, they are assumed to contain valid UTF-16 or UTF-32, and will be
decoded to Python ``str``.
.. code-block:: c++
#define UNICODE
#include <windows.h>
m.def("set_window_text",
[](HWND hwnd, std::wstring s) {
// Call SetWindowText with null-terminated UTF-16 string
::SetWindowText(hwnd, s.c_str());
}
);
m.def("get_window_text",
[](HWND hwnd) {
const int buffer_size = ::GetWindowTextLength(hwnd) + 1;
auto buffer = std::make_unique< wchar_t[] >(buffer_size);
::GetWindowText(hwnd, buffer.data(), buffer_size);
std::wstring text(buffer.get());
// wstring will be converted to Python str
return text;
}
);
.. warning::
Wide character strings may not work as described on Python 2.7 or Python
3.3 compiled with ``--enable-unicode=ucs2``.
Strings in multibyte encodings such as Shift-JIS must transcoded to a
UTF-8/16/32 before being returned to Python.
Character literals
==================
C++ functions that accept character literals as input will receive the first
character of a Python ``str`` as their input. If the string is longer than one
Unicode character, trailing characters will be ignored.
When a character literal is returned from C++ (such as a ``char`` or a
``wchar_t``), it will be converted to a ``str`` that represents the single
character.
.. code-block:: c++
m.def("pass_char", [](char c) { return c; });
m.def("pass_wchar", [](wchar_t w) { return w; });
.. code-block:: python
>>> example.pass_char('A')
'A'
While C++ will cast integers to character types (``char c = 0x65;``), pybind11
does not convert Python integers to characters implicitly. The Python function
``chr()`` can be used to convert integers to characters.
.. code-block:: python
>>> example.pass_char(0x65)
TypeError
>>> example.pass_char(chr(0x65))
'A'
If the desire is to work with an 8-bit integer, use ``int8_t`` or ``uint8_t``
as the argument type.
Grapheme clusters
-----------------
A single grapheme may be represented by two or more Unicode characters. For
example 'é' is usually represented as U+00E9 but can also be expressed as the
combining character sequence U+0065 U+0301 (that is, the letter 'e' followed by
a combining acute accent). The combining character will be lost if the
two-character sequence is passed as an argument, even though it renders as a
single grapheme.
.. code-block:: python
>>> example.pass_wchar('é')
'é'
>>> combining_e_acute = 'e' + '\u0301'
>>> combining_e_acute
'é'
>>> combining_e_acute == 'é'
False
>>> example.pass_wchar(combining_e_acute)
'e'
Normalizing combining characters before passing the character literal to C++
may resolve *some* of these issues:
.. code-block:: python
>>> example.pass_wchar(unicodedata.normalize('NFC', combining_e_acute))
'é'
In some languages (Thai for example), there are `graphemes that cannot be
expressed as a single Unicode code point
<http://unicode.org/reports/tr29/#Grapheme_Cluster_Boundaries>`_, so there is
no way to capture them in a C++ character type.
C++17 string views
==================
C++17 string views are automatically supported when compiling in C++17 mode.
They follow the same rules for encoding and decoding as the corresponding STL
string type (for example, a ``std::u16string_view`` argument will be passed
UTF-16-encoded data, and a returned ``std::string_view`` will be decoded as
UTF-8).
References
==========
* `The Absolute Minimum Every Software Developer Absolutely, Positively Must Know About Unicode and Character Sets (No Excuses!) <https://www.joelonsoftware.com/2003/10/08/the-absolute-minimum-every-software-developer-absolutely-positively-must-know-about-unicode-and-character-sets-no-excuses/>`_
* `C++ - Using STL Strings at Win32 API Boundaries <https://msdn.microsoft.com/en-ca/magazine/mt238407.aspx>`_
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/advanced/cast/strings.rst | strings.rst |
Eigen
#####
`Eigen <http://eigen.tuxfamily.org>`_ is C++ header-based library for dense and
sparse linear algebra. Due to its popularity and widespread adoption, pybind11
provides transparent conversion and limited mapping support between Eigen and
Scientific Python linear algebra data types.
To enable the built-in Eigen support you must include the optional header file
:file:`pybind11/eigen.h`.
Pass-by-value
=============
When binding a function with ordinary Eigen dense object arguments (for
example, ``Eigen::MatrixXd``), pybind11 will accept any input value that is
already (or convertible to) a ``numpy.ndarray`` with dimensions compatible with
the Eigen type, copy its values into a temporary Eigen variable of the
appropriate type, then call the function with this temporary variable.
Sparse matrices are similarly copied to or from
``scipy.sparse.csr_matrix``/``scipy.sparse.csc_matrix`` objects.
Pass-by-reference
=================
One major limitation of the above is that every data conversion implicitly
involves a copy, which can be both expensive (for large matrices) and disallows
binding functions that change their (Matrix) arguments. Pybind11 allows you to
work around this by using Eigen's ``Eigen::Ref<MatrixType>`` class much as you
would when writing a function taking a generic type in Eigen itself (subject to
some limitations discussed below).
When calling a bound function accepting a ``Eigen::Ref<const MatrixType>``
type, pybind11 will attempt to avoid copying by using an ``Eigen::Map`` object
that maps into the source ``numpy.ndarray`` data: this requires both that the
data types are the same (e.g. ``dtype='float64'`` and ``MatrixType::Scalar`` is
``double``); and that the storage is layout compatible. The latter limitation
is discussed in detail in the section below, and requires careful
consideration: by default, numpy matrices and Eigen matrices are *not* storage
compatible.
If the numpy matrix cannot be used as is (either because its types differ, e.g.
passing an array of integers to an Eigen parameter requiring doubles, or
because the storage is incompatible), pybind11 makes a temporary copy and
passes the copy instead.
When a bound function parameter is instead ``Eigen::Ref<MatrixType>`` (note the
lack of ``const``), pybind11 will only allow the function to be called if it
can be mapped *and* if the numpy array is writeable (that is
``a.flags.writeable`` is true). Any access (including modification) made to
the passed variable will be transparently carried out directly on the
``numpy.ndarray``.
This means you can can write code such as the following and have it work as
expected:
.. code-block:: cpp
void scale_by_2(Eigen::Ref<Eigen::VectorXd> v) {
v *= 2;
}
Note, however, that you will likely run into limitations due to numpy and
Eigen's difference default storage order for data; see the below section on
:ref:`storage_orders` for details on how to bind code that won't run into such
limitations.
.. note::
Passing by reference is not supported for sparse types.
Returning values to Python
==========================
When returning an ordinary dense Eigen matrix type to numpy (e.g.
``Eigen::MatrixXd`` or ``Eigen::RowVectorXf``) pybind11 keeps the matrix and
returns a numpy array that directly references the Eigen matrix: no copy of the
data is performed. The numpy array will have ``array.flags.owndata`` set to
``False`` to indicate that it does not own the data, and the lifetime of the
stored Eigen matrix will be tied to the returned ``array``.
If you bind a function with a non-reference, ``const`` return type (e.g.
``const Eigen::MatrixXd``), the same thing happens except that pybind11 also
sets the numpy array's ``writeable`` flag to false.
If you return an lvalue reference or pointer, the usual pybind11 rules apply,
as dictated by the binding function's return value policy (see the
documentation on :ref:`return_value_policies` for full details). That means,
without an explicit return value policy, lvalue references will be copied and
pointers will be managed by pybind11. In order to avoid copying, you should
explicitly specify an appropriate return value policy, as in the following
example:
.. code-block:: cpp
class MyClass {
Eigen::MatrixXd big_mat = Eigen::MatrixXd::Zero(10000, 10000);
public:
Eigen::MatrixXd &getMatrix() { return big_mat; }
const Eigen::MatrixXd &viewMatrix() { return big_mat; }
};
// Later, in binding code:
py::class_<MyClass>(m, "MyClass")
.def(py::init<>())
.def("copy_matrix", &MyClass::getMatrix) // Makes a copy!
.def("get_matrix", &MyClass::getMatrix, py::return_value_policy::reference_internal)
.def("view_matrix", &MyClass::viewMatrix, py::return_value_policy::reference_internal)
;
.. code-block:: python
a = MyClass()
m = a.get_matrix() # flags.writeable = True, flags.owndata = False
v = a.view_matrix() # flags.writeable = False, flags.owndata = False
c = a.copy_matrix() # flags.writeable = True, flags.owndata = True
# m[5,6] and v[5,6] refer to the same element, c[5,6] does not.
Note in this example that ``py::return_value_policy::reference_internal`` is
used to tie the life of the MyClass object to the life of the returned arrays.
You may also return an ``Eigen::Ref``, ``Eigen::Map`` or other map-like Eigen
object (for example, the return value of ``matrix.block()`` and related
methods) that map into a dense Eigen type. When doing so, the default
behaviour of pybind11 is to simply reference the returned data: you must take
care to ensure that this data remains valid! You may ask pybind11 to
explicitly *copy* such a return value by using the
``py::return_value_policy::copy`` policy when binding the function. You may
also use ``py::return_value_policy::reference_internal`` or a
``py::keep_alive`` to ensure the data stays valid as long as the returned numpy
array does.
When returning such a reference of map, pybind11 additionally respects the
readonly-status of the returned value, marking the numpy array as non-writeable
if the reference or map was itself read-only.
.. note::
Sparse types are always copied when returned.
.. _storage_orders:
Storage orders
==============
Passing arguments via ``Eigen::Ref`` has some limitations that you must be
aware of in order to effectively pass matrices by reference. First and
foremost is that the default ``Eigen::Ref<MatrixType>`` class requires
contiguous storage along columns (for column-major types, the default in Eigen)
or rows if ``MatrixType`` is specifically an ``Eigen::RowMajor`` storage type.
The former, Eigen's default, is incompatible with ``numpy``'s default row-major
storage, and so you will not be able to pass numpy arrays to Eigen by reference
without making one of two changes.
(Note that this does not apply to vectors (or column or row matrices): for such
types the "row-major" and "column-major" distinction is meaningless).
The first approach is to change the use of ``Eigen::Ref<MatrixType>`` to the
more general ``Eigen::Ref<MatrixType, 0, Eigen::Stride<Eigen::Dynamic,
Eigen::Dynamic>>`` (or similar type with a fully dynamic stride type in the
third template argument). Since this is a rather cumbersome type, pybind11
provides a ``py::EigenDRef<MatrixType>`` type alias for your convenience (along
with EigenDMap for the equivalent Map, and EigenDStride for just the stride
type).
This type allows Eigen to map into any arbitrary storage order. This is not
the default in Eigen for performance reasons: contiguous storage allows
vectorization that cannot be done when storage is not known to be contiguous at
compile time. The default ``Eigen::Ref`` stride type allows non-contiguous
storage along the outer dimension (that is, the rows of a column-major matrix
or columns of a row-major matrix), but not along the inner dimension.
This type, however, has the added benefit of also being able to map numpy array
slices. For example, the following (contrived) example uses Eigen with a numpy
slice to multiply by 2 all coefficients that are both on even rows (0, 2, 4,
...) and in columns 2, 5, or 8:
.. code-block:: cpp
m.def("scale", [](py::EigenDRef<Eigen::MatrixXd> m, double c) { m *= c; });
.. code-block:: python
# a = np.array(...)
scale_by_2(myarray[0::2, 2:9:3])
The second approach to avoid copying is more intrusive: rearranging the
underlying data types to not run into the non-contiguous storage problem in the
first place. In particular, that means using matrices with ``Eigen::RowMajor``
storage, where appropriate, such as:
.. code-block:: cpp
using RowMatrixXd = Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;
// Use RowMatrixXd instead of MatrixXd
Now bound functions accepting ``Eigen::Ref<RowMatrixXd>`` arguments will be
callable with numpy's (default) arrays without involving a copying.
You can, alternatively, change the storage order that numpy arrays use by
adding the ``order='F'`` option when creating an array:
.. code-block:: python
myarray = np.array(source, order='F')
Such an object will be passable to a bound function accepting an
``Eigen::Ref<MatrixXd>`` (or similar column-major Eigen type).
One major caveat with this approach, however, is that it is not entirely as
easy as simply flipping all Eigen or numpy usage from one to the other: some
operations may alter the storage order of a numpy array. For example, ``a2 =
array.transpose()`` results in ``a2`` being a view of ``array`` that references
the same data, but in the opposite storage order!
While this approach allows fully optimized vectorized calculations in Eigen, it
cannot be used with array slices, unlike the first approach.
When *returning* a matrix to Python (either a regular matrix, a reference via
``Eigen::Ref<>``, or a map/block into a matrix), no special storage
consideration is required: the created numpy array will have the required
stride that allows numpy to properly interpret the array, whatever its storage
order.
Failing rather than copying
===========================
The default behaviour when binding ``Eigen::Ref<const MatrixType>`` Eigen
references is to copy matrix values when passed a numpy array that does not
conform to the element type of ``MatrixType`` or does not have a compatible
stride layout. If you want to explicitly avoid copying in such a case, you
should bind arguments using the ``py::arg().noconvert()`` annotation (as
described in the :ref:`nonconverting_arguments` documentation).
The following example shows an example of arguments that don't allow data
copying to take place:
.. code-block:: cpp
// The method and function to be bound:
class MyClass {
// ...
double some_method(const Eigen::Ref<const MatrixXd> &matrix) { /* ... */ }
};
float some_function(const Eigen::Ref<const MatrixXf> &big,
const Eigen::Ref<const MatrixXf> &small) {
// ...
}
// The associated binding code:
using namespace pybind11::literals; // for "arg"_a
py::class_<MyClass>(m, "MyClass")
// ... other class definitions
.def("some_method", &MyClass::some_method, py::arg().noconvert());
m.def("some_function", &some_function,
"big"_a.noconvert(), // <- Don't allow copying for this arg
"small"_a // <- This one can be copied if needed
);
With the above binding code, attempting to call the the ``some_method(m)``
method on a ``MyClass`` object, or attempting to call ``some_function(m, m2)``
will raise a ``RuntimeError`` rather than making a temporary copy of the array.
It will, however, allow the ``m2`` argument to be copied into a temporary if
necessary.
Note that explicitly specifying ``.noconvert()`` is not required for *mutable*
Eigen references (e.g. ``Eigen::Ref<MatrixXd>`` without ``const`` on the
``MatrixXd``): mutable references will never be called with a temporary copy.
Vectors versus column/row matrices
==================================
Eigen and numpy have fundamentally different notions of a vector. In Eigen, a
vector is simply a matrix with the number of columns or rows set to 1 at
compile time (for a column vector or row vector, respectively). Numpy, in
contrast, has comparable 2-dimensional 1xN and Nx1 arrays, but *also* has
1-dimensional arrays of size N.
When passing a 2-dimensional 1xN or Nx1 array to Eigen, the Eigen type must
have matching dimensions: That is, you cannot pass a 2-dimensional Nx1 numpy
array to an Eigen value expecting a row vector, or a 1xN numpy array as a
column vector argument.
On the other hand, pybind11 allows you to pass 1-dimensional arrays of length N
as Eigen parameters. If the Eigen type can hold a column vector of length N it
will be passed as such a column vector. If not, but the Eigen type constraints
will accept a row vector, it will be passed as a row vector. (The column
vector takes precedence when both are supported, for example, when passing a
1D numpy array to a MatrixXd argument). Note that the type need not be
explicitly a vector: it is permitted to pass a 1D numpy array of size 5 to an
Eigen ``Matrix<double, Dynamic, 5>``: you would end up with a 1x5 Eigen matrix.
Passing the same to an ``Eigen::MatrixXd`` would result in a 5x1 Eigen matrix.
When returning an Eigen vector to numpy, the conversion is ambiguous: a row
vector of length 4 could be returned as either a 1D array of length 4, or as a
2D array of size 1x4. When encountering such a situation, pybind11 compromises
by considering the returned Eigen type: if it is a compile-time vector--that
is, the type has either the number of rows or columns set to 1 at compile
time--pybind11 converts to a 1D numpy array when returning the value. For
instances that are a vector only at run-time (e.g. ``MatrixXd``,
``Matrix<float, Dynamic, 4>``), pybind11 returns the vector as a 2D array to
numpy. If this isn't want you want, you can use ``array.reshape(...)`` to get
a view of the same data in the desired dimensions.
.. seealso::
The file :file:`tests/test_eigen.cpp` contains a complete example that
shows how to pass Eigen sparse and dense data types in more detail.
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/advanced/cast/eigen.rst | eigen.rst |
Chrono
======
When including the additional header file :file:`pybind11/chrono.h` conversions
from C++11 chrono datatypes to python datetime objects are automatically enabled.
This header also enables conversions of python floats (often from sources such
as ``time.monotonic()``, ``time.perf_counter()`` and ``time.process_time()``)
into durations.
An overview of clocks in C++11
------------------------------
A point of confusion when using these conversions is the differences between
clocks provided in C++11. There are three clock types defined by the C++11
standard and users can define their own if needed. Each of these clocks have
different properties and when converting to and from python will give different
results.
The first clock defined by the standard is ``std::chrono::system_clock``. This
clock measures the current date and time. However, this clock changes with to
updates to the operating system time. For example, if your time is synchronised
with a time server this clock will change. This makes this clock a poor choice
for timing purposes but good for measuring the wall time.
The second clock defined in the standard is ``std::chrono::steady_clock``.
This clock ticks at a steady rate and is never adjusted. This makes it excellent
for timing purposes, however the value in this clock does not correspond to the
current date and time. Often this clock will be the amount of time your system
has been on, although it does not have to be. This clock will never be the same
clock as the system clock as the system clock can change but steady clocks
cannot.
The third clock defined in the standard is ``std::chrono::high_resolution_clock``.
This clock is the clock that has the highest resolution out of the clocks in the
system. It is normally a typedef to either the system clock or the steady clock
but can be its own independent clock. This is important as when using these
conversions as the types you get in python for this clock might be different
depending on the system.
If it is a typedef of the system clock, python will get datetime objects, but if
it is a different clock they will be timedelta objects.
Provided conversions
--------------------
.. rubric:: C++ to Python
- ``std::chrono::system_clock::time_point`` → ``datetime.datetime``
System clock times are converted to python datetime instances. They are
in the local timezone, but do not have any timezone information attached
to them (they are naive datetime objects).
- ``std::chrono::duration`` → ``datetime.timedelta``
Durations are converted to timedeltas, any precision in the duration
greater than microseconds is lost by rounding towards zero.
- ``std::chrono::[other_clocks]::time_point`` → ``datetime.timedelta``
Any clock time that is not the system clock is converted to a time delta.
This timedelta measures the time from the clocks epoch to now.
.. rubric:: Python to C++
- ``datetime.datetime`` or ``datetime.date`` or ``datetime.time`` → ``std::chrono::system_clock::time_point``
Date/time objects are converted into system clock timepoints. Any
timezone information is ignored and the type is treated as a naive
object.
- ``datetime.timedelta`` → ``std::chrono::duration``
Time delta are converted into durations with microsecond precision.
- ``datetime.timedelta`` → ``std::chrono::[other_clocks]::time_point``
Time deltas that are converted into clock timepoints are treated as
the amount of time from the start of the clocks epoch.
- ``float`` → ``std::chrono::duration``
Floats that are passed to C++ as durations be interpreted as a number of
seconds. These will be converted to the duration using ``duration_cast``
from the float.
- ``float`` → ``std::chrono::[other_clocks]::time_point``
Floats that are passed to C++ as time points will be interpreted as the
number of seconds from the start of the clocks epoch.
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/advanced/cast/chrono.rst | chrono.rst |
Type conversions
################
Apart from enabling cross-language function calls, a fundamental problem
that a binding tool like pybind11 must address is to provide access to
native Python types in C++ and vice versa. There are three fundamentally
different ways to do this—which approach is preferable for a particular type
depends on the situation at hand.
1. Use a native C++ type everywhere. In this case, the type must be wrapped
using pybind11-generated bindings so that Python can interact with it.
2. Use a native Python type everywhere. It will need to be wrapped so that
C++ functions can interact with it.
3. Use a native C++ type on the C++ side and a native Python type on the
Python side. pybind11 refers to this as a *type conversion*.
Type conversions are the most "natural" option in the sense that native
(non-wrapped) types are used everywhere. The main downside is that a copy
of the data must be made on every Python ↔ C++ transition: this is
needed since the C++ and Python versions of the same type generally won't
have the same memory layout.
pybind11 can perform many kinds of conversions automatically. An overview
is provided in the table ":ref:`conversion_table`".
The following subsections discuss the differences between these options in more
detail. The main focus in this section is on type conversions, which represent
the last case of the above list.
.. toctree::
:maxdepth: 1
overview
strings
stl
functional
chrono
eigen
custom
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/advanced/cast/index.rst | index.rst |
STL containers
##############
Automatic conversion
====================
When including the additional header file :file:`pybind11/stl.h`, conversions
between ``std::vector<>``/``std::deque<>``/``std::list<>``/``std::array<>``,
``std::set<>``/``std::unordered_set<>``, and
``std::map<>``/``std::unordered_map<>`` and the Python ``list``, ``set`` and
``dict`` data structures are automatically enabled. The types ``std::pair<>``
and ``std::tuple<>`` are already supported out of the box with just the core
:file:`pybind11/pybind11.h` header.
The major downside of these implicit conversions is that containers must be
converted (i.e. copied) on every Python->C++ and C++->Python transition, which
can have implications on the program semantics and performance. Please read the
next sections for more details and alternative approaches that avoid this.
.. note::
Arbitrary nesting of any of these types is possible.
.. seealso::
The file :file:`tests/test_stl.cpp` contains a complete
example that demonstrates how to pass STL data types in more detail.
.. _cpp17_container_casters:
C++17 library containers
========================
The :file:`pybind11/stl.h` header also includes support for ``std::optional<>``
and ``std::variant<>``. These require a C++17 compiler and standard library.
In C++14 mode, ``std::experimental::optional<>`` is supported if available.
Various versions of these containers also exist for C++11 (e.g. in Boost).
pybind11 provides an easy way to specialize the ``type_caster`` for such
types:
.. code-block:: cpp
// `boost::optional` as an example -- can be any `std::optional`-like container
namespace pybind11 { namespace detail {
template <typename T>
struct type_caster<boost::optional<T>> : optional_caster<boost::optional<T>> {};
}}
The above should be placed in a header file and included in all translation units
where automatic conversion is needed. Similarly, a specialization can be provided
for custom variant types:
.. code-block:: cpp
// `boost::variant` as an example -- can be any `std::variant`-like container
namespace pybind11 { namespace detail {
template <typename... Ts>
struct type_caster<boost::variant<Ts...>> : variant_caster<boost::variant<Ts...>> {};
// Specifies the function used to visit the variant -- `apply_visitor` instead of `visit`
template <>
struct visit_helper<boost::variant> {
template <typename... Args>
static auto call(Args &&...args) -> decltype(boost::apply_visitor(args...)) {
return boost::apply_visitor(args...);
}
};
}} // namespace pybind11::detail
The ``visit_helper`` specialization is not required if your ``name::variant`` provides
a ``name::visit()`` function. For any other function name, the specialization must be
included to tell pybind11 how to visit the variant.
.. note::
pybind11 only supports the modern implementation of ``boost::variant``
which makes use of variadic templates. This requires Boost 1.56 or newer.
Additionally, on Windows, MSVC 2017 is required because ``boost::variant``
falls back to the old non-variadic implementation on MSVC 2015.
.. _opaque:
Making opaque types
===================
pybind11 heavily relies on a template matching mechanism to convert parameters
and return values that are constructed from STL data types such as vectors,
linked lists, hash tables, etc. This even works in a recursive manner, for
instance to deal with lists of hash maps of pairs of elementary and custom
types, etc.
However, a fundamental limitation of this approach is that internal conversions
between Python and C++ types involve a copy operation that prevents
pass-by-reference semantics. What does this mean?
Suppose we bind the following function
.. code-block:: cpp
void append_1(std::vector<int> &v) {
v.push_back(1);
}
and call it from Python, the following happens:
.. code-block:: pycon
>>> v = [5, 6]
>>> append_1(v)
>>> print(v)
[5, 6]
As you can see, when passing STL data structures by reference, modifications
are not propagated back the Python side. A similar situation arises when
exposing STL data structures using the ``def_readwrite`` or ``def_readonly``
functions:
.. code-block:: cpp
/* ... definition ... */
class MyClass {
std::vector<int> contents;
};
/* ... binding code ... */
py::class_<MyClass>(m, "MyClass")
.def(py::init<>())
.def_readwrite("contents", &MyClass::contents);
In this case, properties can be read and written in their entirety. However, an
``append`` operation involving such a list type has no effect:
.. code-block:: pycon
>>> m = MyClass()
>>> m.contents = [5, 6]
>>> print(m.contents)
[5, 6]
>>> m.contents.append(7)
>>> print(m.contents)
[5, 6]
Finally, the involved copy operations can be costly when dealing with very
large lists. To deal with all of the above situations, pybind11 provides a
macro named ``PYBIND11_MAKE_OPAQUE(T)`` that disables the template-based
conversion machinery of types, thus rendering them *opaque*. The contents of
opaque objects are never inspected or extracted, hence they *can* be passed by
reference. For instance, to turn ``std::vector<int>`` into an opaque type, add
the declaration
.. code-block:: cpp
PYBIND11_MAKE_OPAQUE(std::vector<int>);
before any binding code (e.g. invocations to ``class_::def()``, etc.). This
macro must be specified at the top level (and outside of any namespaces), since
it instantiates a partial template overload. If your binding code consists of
multiple compilation units, it must be present in every file (typically via a
common header) preceding any usage of ``std::vector<int>``. Opaque types must
also have a corresponding ``class_`` declaration to associate them with a name
in Python, and to define a set of available operations, e.g.:
.. code-block:: cpp
py::class_<std::vector<int>>(m, "IntVector")
.def(py::init<>())
.def("clear", &std::vector<int>::clear)
.def("pop_back", &std::vector<int>::pop_back)
.def("__len__", [](const std::vector<int> &v) { return v.size(); })
.def("__iter__", [](std::vector<int> &v) {
return py::make_iterator(v.begin(), v.end());
}, py::keep_alive<0, 1>()) /* Keep vector alive while iterator is used */
// ....
.. seealso::
The file :file:`tests/test_opaque_types.cpp` contains a complete
example that demonstrates how to create and expose opaque types using
pybind11 in more detail.
.. _stl_bind:
Binding STL containers
======================
The ability to expose STL containers as native Python objects is a fairly
common request, hence pybind11 also provides an optional header file named
:file:`pybind11/stl_bind.h` that does exactly this. The mapped containers try
to match the behavior of their native Python counterparts as much as possible.
The following example showcases usage of :file:`pybind11/stl_bind.h`:
.. code-block:: cpp
// Don't forget this
#include <pybind11/stl_bind.h>
PYBIND11_MAKE_OPAQUE(std::vector<int>);
PYBIND11_MAKE_OPAQUE(std::map<std::string, double>);
// ...
// later in binding code:
py::bind_vector<std::vector<int>>(m, "VectorInt");
py::bind_map<std::map<std::string, double>>(m, "MapStringDouble");
When binding STL containers pybind11 considers the types of the container's
elements to decide whether the container should be confined to the local module
(via the :ref:`module_local` feature). If the container element types are
anything other than already-bound custom types bound without
``py::module_local()`` the container binding will have ``py::module_local()``
applied. This includes converting types such as numeric types, strings, Eigen
types; and types that have not yet been bound at the time of the stl container
binding. This module-local binding is designed to avoid potential conflicts
between module bindings (for example, from two separate modules each attempting
to bind ``std::vector<int>`` as a python type).
It is possible to override this behavior to force a definition to be either
module-local or global. To do so, you can pass the attributes
``py::module_local()`` (to make the binding module-local) or
``py::module_local(false)`` (to make the binding global) into the
``py::bind_vector`` or ``py::bind_map`` arguments:
.. code-block:: cpp
py::bind_vector<std::vector<int>>(m, "VectorInt", py::module_local(false));
Note, however, that such a global binding would make it impossible to load this
module at the same time as any other pybind module that also attempts to bind
the same container type (``std::vector<int>`` in the above example).
See :ref:`module_local` for more details on module-local bindings.
.. seealso::
The file :file:`tests/test_stl_binders.cpp` shows how to use the
convenience STL container wrappers.
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/advanced/cast/stl.rst | stl.rst |
Custom type casters
===================
In very rare cases, applications may require custom type casters that cannot be
expressed using the abstractions provided by pybind11, thus requiring raw
Python C API calls. This is fairly advanced usage and should only be pursued by
experts who are familiar with the intricacies of Python reference counting.
The following snippets demonstrate how this works for a very simple ``inty``
type that that should be convertible from Python types that provide a
``__int__(self)`` method.
.. code-block:: cpp
struct inty { long long_value; };
void print(inty s) {
std::cout << s.long_value << std::endl;
}
The following Python snippet demonstrates the intended usage from the Python side:
.. code-block:: python
class A:
def __int__(self):
return 123
from example import print
print(A())
To register the necessary conversion routines, it is necessary to add
a partial overload to the ``pybind11::detail::type_caster<T>`` template.
Although this is an implementation detail, adding partial overloads to this
type is explicitly allowed.
.. code-block:: cpp
namespace pybind11 { namespace detail {
template <> struct type_caster<inty> {
public:
/**
* This macro establishes the name 'inty' in
* function signatures and declares a local variable
* 'value' of type inty
*/
PYBIND11_TYPE_CASTER(inty, _("inty"));
/**
* Conversion part 1 (Python->C++): convert a PyObject into a inty
* instance or return false upon failure. The second argument
* indicates whether implicit conversions should be applied.
*/
bool load(handle src, bool) {
/* Extract PyObject from handle */
PyObject *source = src.ptr();
/* Try converting into a Python integer value */
PyObject *tmp = PyNumber_Long(source);
if (!tmp)
return false;
/* Now try to convert into a C++ int */
value.long_value = PyLong_AsLong(tmp);
Py_DECREF(tmp);
/* Ensure return code was OK (to avoid out-of-range errors etc) */
return !(value.long_value == -1 && !PyErr_Occurred());
}
/**
* Conversion part 2 (C++ -> Python): convert an inty instance into
* a Python object. The second and third arguments are used to
* indicate the return value policy and parent object (for
* ``return_value_policy::reference_internal``) and are generally
* ignored by implicit casters.
*/
static handle cast(inty src, return_value_policy /* policy */, handle /* parent */) {
return PyLong_FromLong(src.long_value);
}
};
}} // namespace pybind11::detail
.. note::
A ``type_caster<T>`` defined with ``PYBIND11_TYPE_CASTER(T, ...)`` requires
that ``T`` is default-constructible (``value`` is first default constructed
and then ``load()`` assigns to it).
.. warning::
When using custom type casters, it's important to declare them consistently
in every compilation unit of the Python extension module. Otherwise,
undefined behavior can ensue.
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/advanced/cast/custom.rst | custom.rst |
Overview
########
.. rubric:: 1. Native type in C++, wrapper in Python
Exposing a custom C++ type using :class:`py::class_` was covered in detail
in the :doc:`/classes` section. There, the underlying data structure is
always the original C++ class while the :class:`py::class_` wrapper provides
a Python interface. Internally, when an object like this is sent from C++ to
Python, pybind11 will just add the outer wrapper layer over the native C++
object. Getting it back from Python is just a matter of peeling off the
wrapper.
.. rubric:: 2. Wrapper in C++, native type in Python
This is the exact opposite situation. Now, we have a type which is native to
Python, like a ``tuple`` or a ``list``. One way to get this data into C++ is
with the :class:`py::object` family of wrappers. These are explained in more
detail in the :doc:`/advanced/pycpp/object` section. We'll just give a quick
example here:
.. code-block:: cpp
void print_list(py::list my_list) {
for (auto item : my_list)
std::cout << item << " ";
}
.. code-block:: pycon
>>> print_list([1, 2, 3])
1 2 3
The Python ``list`` is not converted in any way -- it's just wrapped in a C++
:class:`py::list` class. At its core it's still a Python object. Copying a
:class:`py::list` will do the usual reference-counting like in Python.
Returning the object to Python will just remove the thin wrapper.
.. rubric:: 3. Converting between native C++ and Python types
In the previous two cases we had a native type in one language and a wrapper in
the other. Now, we have native types on both sides and we convert between them.
.. code-block:: cpp
void print_vector(const std::vector<int> &v) {
for (auto item : v)
std::cout << item << "\n";
}
.. code-block:: pycon
>>> print_vector([1, 2, 3])
1 2 3
In this case, pybind11 will construct a new ``std::vector<int>`` and copy each
element from the Python ``list``. The newly constructed object will be passed
to ``print_vector``. The same thing happens in the other direction: a new
``list`` is made to match the value returned from C++.
Lots of these conversions are supported out of the box, as shown in the table
below. They are very convenient, but keep in mind that these conversions are
fundamentally based on copying data. This is perfectly fine for small immutable
types but it may become quite expensive for large data structures. This can be
avoided by overriding the automatic conversion with a custom wrapper (i.e. the
above-mentioned approach 1). This requires some manual effort and more details
are available in the :ref:`opaque` section.
.. _conversion_table:
List of all builtin conversions
-------------------------------
The following basic data types are supported out of the box (some may require
an additional extension header to be included). To pass other data structures
as arguments and return values, refer to the section on binding :ref:`classes`.
+------------------------------------+---------------------------+-------------------------------+
| Data type | Description | Header file |
+====================================+===========================+===============================+
| ``int8_t``, ``uint8_t`` | 8-bit integers | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``int16_t``, ``uint16_t`` | 16-bit integers | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``int32_t``, ``uint32_t`` | 32-bit integers | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``int64_t``, ``uint64_t`` | 64-bit integers | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``ssize_t``, ``size_t`` | Platform-dependent size | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``float``, ``double`` | Floating point types | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``bool`` | Two-state Boolean type | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``char`` | Character literal | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``char16_t`` | UTF-16 character literal | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``char32_t`` | UTF-32 character literal | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``wchar_t`` | Wide character literal | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``const char *`` | UTF-8 string literal | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``const char16_t *`` | UTF-16 string literal | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``const char32_t *`` | UTF-32 string literal | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``const wchar_t *`` | Wide string literal | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::string`` | STL dynamic UTF-8 string | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::u16string`` | STL dynamic UTF-16 string | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::u32string`` | STL dynamic UTF-32 string | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::wstring`` | STL dynamic wide string | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::string_view``, | STL C++17 string views | :file:`pybind11/pybind11.h` |
| ``std::u16string_view``, etc. | | |
+------------------------------------+---------------------------+-------------------------------+
| ``std::pair<T1, T2>`` | Pair of two custom types | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::tuple<...>`` | Arbitrary tuple of types | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::reference_wrapper<...>`` | Reference type wrapper | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::complex<T>`` | Complex numbers | :file:`pybind11/complex.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::array<T, Size>`` | STL static array | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::vector<T>`` | STL dynamic array | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::deque<T>`` | STL double-ended queue | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::valarray<T>`` | STL value array | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::list<T>`` | STL linked list | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::map<T1, T2>`` | STL ordered map | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::unordered_map<T1, T2>`` | STL unordered map | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::set<T>`` | STL ordered set | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::unordered_set<T>`` | STL unordered set | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::optional<T>`` | STL optional type (C++17) | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::experimental::optional<T>`` | STL optional type (exp.) | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::variant<...>`` | Type-safe union (C++17) | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::function<...>`` | STL polymorphic function | :file:`pybind11/functional.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::chrono::duration<...>`` | STL time duration | :file:`pybind11/chrono.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::chrono::time_point<...>`` | STL date/time | :file:`pybind11/chrono.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``Eigen::Matrix<...>`` | Eigen: dense matrix | :file:`pybind11/eigen.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``Eigen::Map<...>`` | Eigen: mapped memory | :file:`pybind11/eigen.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``Eigen::SparseMatrix<...>`` | Eigen: sparse matrix | :file:`pybind11/eigen.h` |
+------------------------------------+---------------------------+-------------------------------+
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/docs/advanced/cast/overview.rst | overview.rst |
import pytest
from pybind11_tests import copy_move_policies as m
def test_lacking_copy_ctor():
with pytest.raises(RuntimeError) as excinfo:
m.lacking_copy_ctor.get_one()
assert "is non-copyable!" in str(excinfo.value)
def test_lacking_move_ctor():
with pytest.raises(RuntimeError) as excinfo:
m.lacking_move_ctor.get_one()
assert "is neither movable nor copyable!" in str(excinfo.value)
def test_move_and_copy_casts():
"""Cast some values in C++ via custom type casters and count the number of moves/copies."""
cstats = m.move_and_copy_cstats()
c_m, c_mc, c_c = cstats["MoveOnlyInt"], cstats["MoveOrCopyInt"], cstats["CopyOnlyInt"]
# The type move constructions/assignments below each get incremented: the move assignment comes
# from the type_caster load; the move construction happens when extracting that via a cast or
# loading into an argument.
assert m.move_and_copy_casts(3) == 18
assert c_m.copy_assignments + c_m.copy_constructions == 0
assert c_m.move_assignments == 2
assert c_m.move_constructions >= 2
assert c_mc.alive() == 0
assert c_mc.copy_assignments + c_mc.copy_constructions == 0
assert c_mc.move_assignments == 2
assert c_mc.move_constructions >= 2
assert c_c.alive() == 0
assert c_c.copy_assignments == 2
assert c_c.copy_constructions >= 2
assert c_m.alive() + c_mc.alive() + c_c.alive() == 0
def test_move_and_copy_loads():
"""Call some functions that load arguments via custom type casters and count the number of
moves/copies."""
cstats = m.move_and_copy_cstats()
c_m, c_mc, c_c = cstats["MoveOnlyInt"], cstats["MoveOrCopyInt"], cstats["CopyOnlyInt"]
assert m.move_only(10) == 10 # 1 move, c_m
assert m.move_or_copy(11) == 11 # 1 move, c_mc
assert m.copy_only(12) == 12 # 1 copy, c_c
assert m.move_pair((13, 14)) == 27 # 1 c_m move, 1 c_mc move
assert m.move_tuple((15, 16, 17)) == 48 # 2 c_m moves, 1 c_mc move
assert m.copy_tuple((18, 19)) == 37 # 2 c_c copies
# Direct constructions: 2 c_m moves, 2 c_mc moves, 1 c_c copy
# Extra moves/copies when moving pairs/tuples: 3 c_m, 3 c_mc, 2 c_c
assert m.move_copy_nested((1, ((2, 3, (4,)), 5))) == 15
assert c_m.copy_assignments + c_m.copy_constructions == 0
assert c_m.move_assignments == 6
assert c_m.move_constructions == 9
assert c_mc.copy_assignments + c_mc.copy_constructions == 0
assert c_mc.move_assignments == 5
assert c_mc.move_constructions == 8
assert c_c.copy_assignments == 4
assert c_c.copy_constructions == 6
assert c_m.alive() + c_mc.alive() + c_c.alive() == 0
@pytest.mark.skipif(not m.has_optional, reason='no <optional>')
def test_move_and_copy_load_optional():
"""Tests move/copy loads of std::optional arguments"""
cstats = m.move_and_copy_cstats()
c_m, c_mc, c_c = cstats["MoveOnlyInt"], cstats["MoveOrCopyInt"], cstats["CopyOnlyInt"]
# The extra move/copy constructions below come from the std::optional move (which has to move
# its arguments):
assert m.move_optional(10) == 10 # c_m: 1 move assign, 2 move construct
assert m.move_or_copy_optional(11) == 11 # c_mc: 1 move assign, 2 move construct
assert m.copy_optional(12) == 12 # c_c: 1 copy assign, 2 copy construct
# 1 move assign + move construct moves each of c_m, c_mc, 1 c_c copy
# +1 move/copy construct each from moving the tuple
# +1 move/copy construct each from moving the optional (which moves the tuple again)
assert m.move_optional_tuple((3, 4, 5)) == 12
assert c_m.copy_assignments + c_m.copy_constructions == 0
assert c_m.move_assignments == 2
assert c_m.move_constructions == 5
assert c_mc.copy_assignments + c_mc.copy_constructions == 0
assert c_mc.move_assignments == 2
assert c_mc.move_constructions == 5
assert c_c.copy_assignments == 2
assert c_c.copy_constructions == 5
assert c_m.alive() + c_mc.alive() + c_c.alive() == 0
def test_private_op_new():
"""An object with a private `operator new` cannot be returned by value"""
with pytest.raises(RuntimeError) as excinfo:
m.private_op_new_value()
assert "is neither movable nor copyable" in str(excinfo.value)
assert m.private_op_new_reference().value == 1
def test_move_fallback():
"""#389: rvp::move should fall-through to copy on non-movable objects"""
m2 = m.get_moveissue2(2)
assert m2.value == 2
m1 = m.get_moveissue1(1)
assert m1.value == 1
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_copy_move.py | test_copy_move.py |
import pytest
from pybind11_tests import ConstructorStats
pytestmark = pytest.requires_eigen_and_numpy
with pytest.suppress(ImportError):
from pybind11_tests import eigen as m
import numpy as np
ref = np.array([[ 0., 3, 0, 0, 0, 11],
[22, 0, 0, 0, 17, 11],
[ 7, 5, 0, 1, 0, 11],
[ 0, 0, 0, 0, 0, 11],
[ 0, 0, 14, 0, 8, 11]])
def assert_equal_ref(mat):
np.testing.assert_array_equal(mat, ref)
def assert_sparse_equal_ref(sparse_mat):
assert_equal_ref(sparse_mat.toarray())
def test_fixed():
assert_equal_ref(m.fixed_c())
assert_equal_ref(m.fixed_r())
assert_equal_ref(m.fixed_copy_r(m.fixed_r()))
assert_equal_ref(m.fixed_copy_c(m.fixed_c()))
assert_equal_ref(m.fixed_copy_r(m.fixed_c()))
assert_equal_ref(m.fixed_copy_c(m.fixed_r()))
def test_dense():
assert_equal_ref(m.dense_r())
assert_equal_ref(m.dense_c())
assert_equal_ref(m.dense_copy_r(m.dense_r()))
assert_equal_ref(m.dense_copy_c(m.dense_c()))
assert_equal_ref(m.dense_copy_r(m.dense_c()))
assert_equal_ref(m.dense_copy_c(m.dense_r()))
def test_partially_fixed():
ref2 = np.array([[0., 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
np.testing.assert_array_equal(m.partial_copy_four_rm_r(ref2), ref2)
np.testing.assert_array_equal(m.partial_copy_four_rm_c(ref2), ref2)
np.testing.assert_array_equal(m.partial_copy_four_rm_r(ref2[:, 1]), ref2[:, [1]])
np.testing.assert_array_equal(m.partial_copy_four_rm_c(ref2[0, :]), ref2[[0], :])
np.testing.assert_array_equal(m.partial_copy_four_rm_r(ref2[:, (0, 2)]), ref2[:, (0, 2)])
np.testing.assert_array_equal(
m.partial_copy_four_rm_c(ref2[(3, 1, 2), :]), ref2[(3, 1, 2), :])
np.testing.assert_array_equal(m.partial_copy_four_cm_r(ref2), ref2)
np.testing.assert_array_equal(m.partial_copy_four_cm_c(ref2), ref2)
np.testing.assert_array_equal(m.partial_copy_four_cm_r(ref2[:, 1]), ref2[:, [1]])
np.testing.assert_array_equal(m.partial_copy_four_cm_c(ref2[0, :]), ref2[[0], :])
np.testing.assert_array_equal(m.partial_copy_four_cm_r(ref2[:, (0, 2)]), ref2[:, (0, 2)])
np.testing.assert_array_equal(
m.partial_copy_four_cm_c(ref2[(3, 1, 2), :]), ref2[(3, 1, 2), :])
# TypeError should be raise for a shape mismatch
functions = [m.partial_copy_four_rm_r, m.partial_copy_four_rm_c,
m.partial_copy_four_cm_r, m.partial_copy_four_cm_c]
matrix_with_wrong_shape = [[1, 2],
[3, 4]]
for f in functions:
with pytest.raises(TypeError) as excinfo:
f(matrix_with_wrong_shape)
assert "incompatible function arguments" in str(excinfo.value)
def test_mutator_descriptors():
zr = np.arange(30, dtype='float32').reshape(5, 6) # row-major
zc = zr.reshape(6, 5).transpose() # column-major
m.fixed_mutator_r(zr)
m.fixed_mutator_c(zc)
m.fixed_mutator_a(zr)
m.fixed_mutator_a(zc)
with pytest.raises(TypeError) as excinfo:
m.fixed_mutator_r(zc)
assert ('(arg0: numpy.ndarray[float32[5, 6], flags.writeable, flags.c_contiguous]) -> None'
in str(excinfo.value))
with pytest.raises(TypeError) as excinfo:
m.fixed_mutator_c(zr)
assert ('(arg0: numpy.ndarray[float32[5, 6], flags.writeable, flags.f_contiguous]) -> None'
in str(excinfo.value))
with pytest.raises(TypeError) as excinfo:
m.fixed_mutator_a(np.array([[1, 2], [3, 4]], dtype='float32'))
assert ('(arg0: numpy.ndarray[float32[5, 6], flags.writeable]) -> None'
in str(excinfo.value))
zr.flags.writeable = False
with pytest.raises(TypeError):
m.fixed_mutator_r(zr)
with pytest.raises(TypeError):
m.fixed_mutator_a(zr)
def test_cpp_casting():
assert m.cpp_copy(m.fixed_r()) == 22.
assert m.cpp_copy(m.fixed_c()) == 22.
z = np.array([[5., 6], [7, 8]])
assert m.cpp_copy(z) == 7.
assert m.cpp_copy(m.get_cm_ref()) == 21.
assert m.cpp_copy(m.get_rm_ref()) == 21.
assert m.cpp_ref_c(m.get_cm_ref()) == 21.
assert m.cpp_ref_r(m.get_rm_ref()) == 21.
with pytest.raises(RuntimeError) as excinfo:
# Can't reference m.fixed_c: it contains floats, m.cpp_ref_any wants doubles
m.cpp_ref_any(m.fixed_c())
assert 'Unable to cast Python instance' in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
# Can't reference m.fixed_r: it contains floats, m.cpp_ref_any wants doubles
m.cpp_ref_any(m.fixed_r())
assert 'Unable to cast Python instance' in str(excinfo.value)
assert m.cpp_ref_any(m.ReturnTester.create()) == 1.
assert m.cpp_ref_any(m.get_cm_ref()) == 21.
assert m.cpp_ref_any(m.get_cm_ref()) == 21.
def test_pass_readonly_array():
z = np.full((5, 6), 42.0)
z.flags.writeable = False
np.testing.assert_array_equal(z, m.fixed_copy_r(z))
np.testing.assert_array_equal(m.fixed_r_const(), m.fixed_r())
assert not m.fixed_r_const().flags.writeable
np.testing.assert_array_equal(m.fixed_copy_r(m.fixed_r_const()), m.fixed_r_const())
def test_nonunit_stride_from_python():
counting_mat = np.arange(9.0, dtype=np.float32).reshape((3, 3))
second_row = counting_mat[1, :]
second_col = counting_mat[:, 1]
np.testing.assert_array_equal(m.double_row(second_row), 2.0 * second_row)
np.testing.assert_array_equal(m.double_col(second_row), 2.0 * second_row)
np.testing.assert_array_equal(m.double_complex(second_row), 2.0 * second_row)
np.testing.assert_array_equal(m.double_row(second_col), 2.0 * second_col)
np.testing.assert_array_equal(m.double_col(second_col), 2.0 * second_col)
np.testing.assert_array_equal(m.double_complex(second_col), 2.0 * second_col)
counting_3d = np.arange(27.0, dtype=np.float32).reshape((3, 3, 3))
slices = [counting_3d[0, :, :], counting_3d[:, 0, :], counting_3d[:, :, 0]]
for slice_idx, ref_mat in enumerate(slices):
np.testing.assert_array_equal(m.double_mat_cm(ref_mat), 2.0 * ref_mat)
np.testing.assert_array_equal(m.double_mat_rm(ref_mat), 2.0 * ref_mat)
# Mutator:
m.double_threer(second_row)
m.double_threec(second_col)
np.testing.assert_array_equal(counting_mat, [[0., 2, 2], [6, 16, 10], [6, 14, 8]])
def test_negative_stride_from_python(msg):
"""Eigen doesn't support (as of yet) negative strides. When a function takes an Eigen matrix by
copy or const reference, we can pass a numpy array that has negative strides. Otherwise, an
exception will be thrown as Eigen will not be able to map the numpy array."""
counting_mat = np.arange(9.0, dtype=np.float32).reshape((3, 3))
counting_mat = counting_mat[::-1, ::-1]
second_row = counting_mat[1, :]
second_col = counting_mat[:, 1]
np.testing.assert_array_equal(m.double_row(second_row), 2.0 * second_row)
np.testing.assert_array_equal(m.double_col(second_row), 2.0 * second_row)
np.testing.assert_array_equal(m.double_complex(second_row), 2.0 * second_row)
np.testing.assert_array_equal(m.double_row(second_col), 2.0 * second_col)
np.testing.assert_array_equal(m.double_col(second_col), 2.0 * second_col)
np.testing.assert_array_equal(m.double_complex(second_col), 2.0 * second_col)
counting_3d = np.arange(27.0, dtype=np.float32).reshape((3, 3, 3))
counting_3d = counting_3d[::-1, ::-1, ::-1]
slices = [counting_3d[0, :, :], counting_3d[:, 0, :], counting_3d[:, :, 0]]
for slice_idx, ref_mat in enumerate(slices):
np.testing.assert_array_equal(m.double_mat_cm(ref_mat), 2.0 * ref_mat)
np.testing.assert_array_equal(m.double_mat_rm(ref_mat), 2.0 * ref_mat)
# Mutator:
with pytest.raises(TypeError) as excinfo:
m.double_threer(second_row)
assert msg(excinfo.value) == """
double_threer(): incompatible function arguments. The following argument types are supported:
1. (arg0: numpy.ndarray[float32[1, 3], flags.writeable]) -> None
Invoked with: """ + repr(np.array([ 5., 4., 3.], dtype='float32')) # noqa: E501 line too long
with pytest.raises(TypeError) as excinfo:
m.double_threec(second_col)
assert msg(excinfo.value) == """
double_threec(): incompatible function arguments. The following argument types are supported:
1. (arg0: numpy.ndarray[float32[3, 1], flags.writeable]) -> None
Invoked with: """ + repr(np.array([ 7., 4., 1.], dtype='float32')) # noqa: E501 line too long
def test_nonunit_stride_to_python():
assert np.all(m.diagonal(ref) == ref.diagonal())
assert np.all(m.diagonal_1(ref) == ref.diagonal(1))
for i in range(-5, 7):
assert np.all(m.diagonal_n(ref, i) == ref.diagonal(i)), "m.diagonal_n({})".format(i)
assert np.all(m.block(ref, 2, 1, 3, 3) == ref[2:5, 1:4])
assert np.all(m.block(ref, 1, 4, 4, 2) == ref[1:, 4:])
assert np.all(m.block(ref, 1, 4, 3, 2) == ref[1:4, 4:])
def test_eigen_ref_to_python():
chols = [m.cholesky1, m.cholesky2, m.cholesky3, m.cholesky4]
for i, chol in enumerate(chols, start=1):
mymat = chol(np.array([[1., 2, 4], [2, 13, 23], [4, 23, 77]]))
assert np.all(mymat == np.array([[1, 0, 0], [2, 3, 0], [4, 5, 6]])), "cholesky{}".format(i)
def assign_both(a1, a2, r, c, v):
a1[r, c] = v
a2[r, c] = v
def array_copy_but_one(a, r, c, v):
z = np.array(a, copy=True)
z[r, c] = v
return z
def test_eigen_return_references():
"""Tests various ways of returning references and non-referencing copies"""
master = np.ones((10, 10))
a = m.ReturnTester()
a_get1 = a.get()
assert not a_get1.flags.owndata and a_get1.flags.writeable
assign_both(a_get1, master, 3, 3, 5)
a_get2 = a.get_ptr()
assert not a_get2.flags.owndata and a_get2.flags.writeable
assign_both(a_get1, master, 2, 3, 6)
a_view1 = a.view()
assert not a_view1.flags.owndata and not a_view1.flags.writeable
with pytest.raises(ValueError):
a_view1[2, 3] = 4
a_view2 = a.view_ptr()
assert not a_view2.flags.owndata and not a_view2.flags.writeable
with pytest.raises(ValueError):
a_view2[2, 3] = 4
a_copy1 = a.copy_get()
assert a_copy1.flags.owndata and a_copy1.flags.writeable
np.testing.assert_array_equal(a_copy1, master)
a_copy1[7, 7] = -44 # Shouldn't affect anything else
c1want = array_copy_but_one(master, 7, 7, -44)
a_copy2 = a.copy_view()
assert a_copy2.flags.owndata and a_copy2.flags.writeable
np.testing.assert_array_equal(a_copy2, master)
a_copy2[4, 4] = -22 # Shouldn't affect anything else
c2want = array_copy_but_one(master, 4, 4, -22)
a_ref1 = a.ref()
assert not a_ref1.flags.owndata and a_ref1.flags.writeable
assign_both(a_ref1, master, 1, 1, 15)
a_ref2 = a.ref_const()
assert not a_ref2.flags.owndata and not a_ref2.flags.writeable
with pytest.raises(ValueError):
a_ref2[5, 5] = 33
a_ref3 = a.ref_safe()
assert not a_ref3.flags.owndata and a_ref3.flags.writeable
assign_both(a_ref3, master, 0, 7, 99)
a_ref4 = a.ref_const_safe()
assert not a_ref4.flags.owndata and not a_ref4.flags.writeable
with pytest.raises(ValueError):
a_ref4[7, 0] = 987654321
a_copy3 = a.copy_ref()
assert a_copy3.flags.owndata and a_copy3.flags.writeable
np.testing.assert_array_equal(a_copy3, master)
a_copy3[8, 1] = 11
c3want = array_copy_but_one(master, 8, 1, 11)
a_copy4 = a.copy_ref_const()
assert a_copy4.flags.owndata and a_copy4.flags.writeable
np.testing.assert_array_equal(a_copy4, master)
a_copy4[8, 4] = 88
c4want = array_copy_but_one(master, 8, 4, 88)
a_block1 = a.block(3, 3, 2, 2)
assert not a_block1.flags.owndata and a_block1.flags.writeable
a_block1[0, 0] = 55
master[3, 3] = 55
a_block2 = a.block_safe(2, 2, 3, 2)
assert not a_block2.flags.owndata and a_block2.flags.writeable
a_block2[2, 1] = -123
master[4, 3] = -123
a_block3 = a.block_const(6, 7, 4, 3)
assert not a_block3.flags.owndata and not a_block3.flags.writeable
with pytest.raises(ValueError):
a_block3[2, 2] = -44444
a_copy5 = a.copy_block(2, 2, 2, 3)
assert a_copy5.flags.owndata and a_copy5.flags.writeable
np.testing.assert_array_equal(a_copy5, master[2:4, 2:5])
a_copy5[1, 1] = 777
c5want = array_copy_but_one(master[2:4, 2:5], 1, 1, 777)
a_corn1 = a.corners()
assert not a_corn1.flags.owndata and a_corn1.flags.writeable
a_corn1 *= 50
a_corn1[1, 1] = 999
master[0, 0] = 50
master[0, 9] = 50
master[9, 0] = 50
master[9, 9] = 999
a_corn2 = a.corners_const()
assert not a_corn2.flags.owndata and not a_corn2.flags.writeable
with pytest.raises(ValueError):
a_corn2[1, 0] = 51
# All of the changes made all the way along should be visible everywhere
# now (except for the copies, of course)
np.testing.assert_array_equal(a_get1, master)
np.testing.assert_array_equal(a_get2, master)
np.testing.assert_array_equal(a_view1, master)
np.testing.assert_array_equal(a_view2, master)
np.testing.assert_array_equal(a_ref1, master)
np.testing.assert_array_equal(a_ref2, master)
np.testing.assert_array_equal(a_ref3, master)
np.testing.assert_array_equal(a_ref4, master)
np.testing.assert_array_equal(a_block1, master[3:5, 3:5])
np.testing.assert_array_equal(a_block2, master[2:5, 2:4])
np.testing.assert_array_equal(a_block3, master[6:10, 7:10])
np.testing.assert_array_equal(a_corn1, master[0::master.shape[0] - 1, 0::master.shape[1] - 1])
np.testing.assert_array_equal(a_corn2, master[0::master.shape[0] - 1, 0::master.shape[1] - 1])
np.testing.assert_array_equal(a_copy1, c1want)
np.testing.assert_array_equal(a_copy2, c2want)
np.testing.assert_array_equal(a_copy3, c3want)
np.testing.assert_array_equal(a_copy4, c4want)
np.testing.assert_array_equal(a_copy5, c5want)
def assert_keeps_alive(cl, method, *args):
cstats = ConstructorStats.get(cl)
start_with = cstats.alive()
a = cl()
assert cstats.alive() == start_with + 1
z = method(a, *args)
assert cstats.alive() == start_with + 1
del a
# Here's the keep alive in action:
assert cstats.alive() == start_with + 1
del z
# Keep alive should have expired:
assert cstats.alive() == start_with
def test_eigen_keepalive():
a = m.ReturnTester()
cstats = ConstructorStats.get(m.ReturnTester)
assert cstats.alive() == 1
unsafe = [a.ref(), a.ref_const(), a.block(1, 2, 3, 4)]
copies = [a.copy_get(), a.copy_view(), a.copy_ref(), a.copy_ref_const(),
a.copy_block(4, 3, 2, 1)]
del a
assert cstats.alive() == 0
del unsafe
del copies
for meth in [m.ReturnTester.get, m.ReturnTester.get_ptr, m.ReturnTester.view,
m.ReturnTester.view_ptr, m.ReturnTester.ref_safe, m.ReturnTester.ref_const_safe,
m.ReturnTester.corners, m.ReturnTester.corners_const]:
assert_keeps_alive(m.ReturnTester, meth)
for meth in [m.ReturnTester.block_safe, m.ReturnTester.block_const]:
assert_keeps_alive(m.ReturnTester, meth, 4, 3, 2, 1)
def test_eigen_ref_mutators():
"""Tests Eigen's ability to mutate numpy values"""
orig = np.array([[1., 2, 3], [4, 5, 6], [7, 8, 9]])
zr = np.array(orig)
zc = np.array(orig, order='F')
m.add_rm(zr, 1, 0, 100)
assert np.all(zr == np.array([[1., 2, 3], [104, 5, 6], [7, 8, 9]]))
m.add_cm(zc, 1, 0, 200)
assert np.all(zc == np.array([[1., 2, 3], [204, 5, 6], [7, 8, 9]]))
m.add_any(zr, 1, 0, 20)
assert np.all(zr == np.array([[1., 2, 3], [124, 5, 6], [7, 8, 9]]))
m.add_any(zc, 1, 0, 10)
assert np.all(zc == np.array([[1., 2, 3], [214, 5, 6], [7, 8, 9]]))
# Can't reference a col-major array with a row-major Ref, and vice versa:
with pytest.raises(TypeError):
m.add_rm(zc, 1, 0, 1)
with pytest.raises(TypeError):
m.add_cm(zr, 1, 0, 1)
# Overloads:
m.add1(zr, 1, 0, -100)
m.add2(zr, 1, 0, -20)
assert np.all(zr == orig)
m.add1(zc, 1, 0, -200)
m.add2(zc, 1, 0, -10)
assert np.all(zc == orig)
# a non-contiguous slice (this won't work on either the row- or
# column-contiguous refs, but should work for the any)
cornersr = zr[0::2, 0::2]
cornersc = zc[0::2, 0::2]
assert np.all(cornersr == np.array([[1., 3], [7, 9]]))
assert np.all(cornersc == np.array([[1., 3], [7, 9]]))
with pytest.raises(TypeError):
m.add_rm(cornersr, 0, 1, 25)
with pytest.raises(TypeError):
m.add_cm(cornersr, 0, 1, 25)
with pytest.raises(TypeError):
m.add_rm(cornersc, 0, 1, 25)
with pytest.raises(TypeError):
m.add_cm(cornersc, 0, 1, 25)
m.add_any(cornersr, 0, 1, 25)
m.add_any(cornersc, 0, 1, 44)
assert np.all(zr == np.array([[1., 2, 28], [4, 5, 6], [7, 8, 9]]))
assert np.all(zc == np.array([[1., 2, 47], [4, 5, 6], [7, 8, 9]]))
# You shouldn't be allowed to pass a non-writeable array to a mutating Eigen method:
zro = zr[0:4, 0:4]
zro.flags.writeable = False
with pytest.raises(TypeError):
m.add_rm(zro, 0, 0, 0)
with pytest.raises(TypeError):
m.add_any(zro, 0, 0, 0)
with pytest.raises(TypeError):
m.add1(zro, 0, 0, 0)
with pytest.raises(TypeError):
m.add2(zro, 0, 0, 0)
# integer array shouldn't be passable to a double-matrix-accepting mutating func:
zi = np.array([[1, 2], [3, 4]])
with pytest.raises(TypeError):
m.add_rm(zi)
def test_numpy_ref_mutators():
"""Tests numpy mutating Eigen matrices (for returned Eigen::Ref<...>s)"""
m.reset_refs() # In case another test already changed it
zc = m.get_cm_ref()
zcro = m.get_cm_const_ref()
zr = m.get_rm_ref()
zrro = m.get_rm_const_ref()
assert [zc[1, 2], zcro[1, 2], zr[1, 2], zrro[1, 2]] == [23] * 4
assert not zc.flags.owndata and zc.flags.writeable
assert not zr.flags.owndata and zr.flags.writeable
assert not zcro.flags.owndata and not zcro.flags.writeable
assert not zrro.flags.owndata and not zrro.flags.writeable
zc[1, 2] = 99
expect = np.array([[11., 12, 13], [21, 22, 99], [31, 32, 33]])
# We should have just changed zc, of course, but also zcro and the original eigen matrix
assert np.all(zc == expect)
assert np.all(zcro == expect)
assert np.all(m.get_cm_ref() == expect)
zr[1, 2] = 99
assert np.all(zr == expect)
assert np.all(zrro == expect)
assert np.all(m.get_rm_ref() == expect)
# Make sure the readonly ones are numpy-readonly:
with pytest.raises(ValueError):
zcro[1, 2] = 6
with pytest.raises(ValueError):
zrro[1, 2] = 6
# We should be able to explicitly copy like this (and since we're copying,
# the const should drop away)
y1 = np.array(m.get_cm_const_ref())
assert y1.flags.owndata and y1.flags.writeable
# We should get copies of the eigen data, which was modified above:
assert y1[1, 2] == 99
y1[1, 2] += 12
assert y1[1, 2] == 111
assert zc[1, 2] == 99 # Make sure we aren't referencing the original
def test_both_ref_mutators():
"""Tests a complex chain of nested eigen/numpy references"""
m.reset_refs() # In case another test already changed it
z = m.get_cm_ref() # numpy -> eigen
z[0, 2] -= 3
z2 = m.incr_matrix(z, 1) # numpy -> eigen -> numpy -> eigen
z2[1, 1] += 6
z3 = m.incr_matrix(z, 2) # (numpy -> eigen)^3
z3[2, 2] += -5
z4 = m.incr_matrix(z, 3) # (numpy -> eigen)^4
z4[1, 1] -= 1
z5 = m.incr_matrix(z, 4) # (numpy -> eigen)^5
z5[0, 0] = 0
assert np.all(z == z2)
assert np.all(z == z3)
assert np.all(z == z4)
assert np.all(z == z5)
expect = np.array([[0., 22, 20], [31, 37, 33], [41, 42, 38]])
assert np.all(z == expect)
y = np.array(range(100), dtype='float64').reshape(10, 10)
y2 = m.incr_matrix_any(y, 10) # np -> eigen -> np
y3 = m.incr_matrix_any(y2[0::2, 0::2], -33) # np -> eigen -> np slice -> np -> eigen -> np
y4 = m.even_rows(y3) # numpy -> eigen slice -> (... y3)
y5 = m.even_cols(y4) # numpy -> eigen slice -> (... y4)
y6 = m.incr_matrix_any(y5, 1000) # numpy -> eigen -> (... y5)
# Apply same mutations using just numpy:
yexpect = np.array(range(100), dtype='float64').reshape(10, 10)
yexpect += 10
yexpect[0::2, 0::2] -= 33
yexpect[0::4, 0::4] += 1000
assert np.all(y6 == yexpect[0::4, 0::4])
assert np.all(y5 == yexpect[0::4, 0::4])
assert np.all(y4 == yexpect[0::4, 0::2])
assert np.all(y3 == yexpect[0::2, 0::2])
assert np.all(y2 == yexpect)
assert np.all(y == yexpect)
def test_nocopy_wrapper():
# get_elem requires a column-contiguous matrix reference, but should be
# callable with other types of matrix (via copying):
int_matrix_colmajor = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], order='F')
dbl_matrix_colmajor = np.array(int_matrix_colmajor, dtype='double', order='F', copy=True)
int_matrix_rowmajor = np.array(int_matrix_colmajor, order='C', copy=True)
dbl_matrix_rowmajor = np.array(int_matrix_rowmajor, dtype='double', order='C', copy=True)
# All should be callable via get_elem:
assert m.get_elem(int_matrix_colmajor) == 8
assert m.get_elem(dbl_matrix_colmajor) == 8
assert m.get_elem(int_matrix_rowmajor) == 8
assert m.get_elem(dbl_matrix_rowmajor) == 8
# All but the second should fail with m.get_elem_nocopy:
with pytest.raises(TypeError) as excinfo:
m.get_elem_nocopy(int_matrix_colmajor)
assert ('get_elem_nocopy(): incompatible function arguments.' in str(excinfo.value) and
', flags.f_contiguous' in str(excinfo.value))
assert m.get_elem_nocopy(dbl_matrix_colmajor) == 8
with pytest.raises(TypeError) as excinfo:
m.get_elem_nocopy(int_matrix_rowmajor)
assert ('get_elem_nocopy(): incompatible function arguments.' in str(excinfo.value) and
', flags.f_contiguous' in str(excinfo.value))
with pytest.raises(TypeError) as excinfo:
m.get_elem_nocopy(dbl_matrix_rowmajor)
assert ('get_elem_nocopy(): incompatible function arguments.' in str(excinfo.value) and
', flags.f_contiguous' in str(excinfo.value))
# For the row-major test, we take a long matrix in row-major, so only the third is allowed:
with pytest.raises(TypeError) as excinfo:
m.get_elem_rm_nocopy(int_matrix_colmajor)
assert ('get_elem_rm_nocopy(): incompatible function arguments.' in str(excinfo.value) and
', flags.c_contiguous' in str(excinfo.value))
with pytest.raises(TypeError) as excinfo:
m.get_elem_rm_nocopy(dbl_matrix_colmajor)
assert ('get_elem_rm_nocopy(): incompatible function arguments.' in str(excinfo.value) and
', flags.c_contiguous' in str(excinfo.value))
assert m.get_elem_rm_nocopy(int_matrix_rowmajor) == 8
with pytest.raises(TypeError) as excinfo:
m.get_elem_rm_nocopy(dbl_matrix_rowmajor)
assert ('get_elem_rm_nocopy(): incompatible function arguments.' in str(excinfo.value) and
', flags.c_contiguous' in str(excinfo.value))
def test_eigen_ref_life_support():
"""Ensure the lifetime of temporary arrays created by the `Ref` caster
The `Ref` caster sometimes creates a copy which needs to stay alive. This needs to
happen both for directs casts (just the array) or indirectly (e.g. list of arrays).
"""
a = np.full(shape=10, fill_value=8, dtype=np.int8)
assert m.get_elem_direct(a) == 8
list_of_a = [a]
assert m.get_elem_indirect(list_of_a) == 8
def test_special_matrix_objects():
assert np.all(m.incr_diag(7) == np.diag([1., 2, 3, 4, 5, 6, 7]))
asymm = np.array([[ 1., 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12],
[13, 14, 15, 16]])
symm_lower = np.array(asymm)
symm_upper = np.array(asymm)
for i in range(4):
for j in range(i + 1, 4):
symm_lower[i, j] = symm_lower[j, i]
symm_upper[j, i] = symm_upper[i, j]
assert np.all(m.symmetric_lower(asymm) == symm_lower)
assert np.all(m.symmetric_upper(asymm) == symm_upper)
def test_dense_signature(doc):
assert doc(m.double_col) == """
double_col(arg0: numpy.ndarray[float32[m, 1]]) -> numpy.ndarray[float32[m, 1]]
"""
assert doc(m.double_row) == """
double_row(arg0: numpy.ndarray[float32[1, n]]) -> numpy.ndarray[float32[1, n]]
"""
assert doc(m.double_complex) == """
double_complex(arg0: numpy.ndarray[complex64[m, 1]]) -> numpy.ndarray[complex64[m, 1]]
"""
assert doc(m.double_mat_rm) == """
double_mat_rm(arg0: numpy.ndarray[float32[m, n]]) -> numpy.ndarray[float32[m, n]]
"""
def test_named_arguments():
a = np.array([[1.0, 2], [3, 4], [5, 6]])
b = np.ones((2, 1))
assert np.all(m.matrix_multiply(a, b) == np.array([[3.], [7], [11]]))
assert np.all(m.matrix_multiply(A=a, B=b) == np.array([[3.], [7], [11]]))
assert np.all(m.matrix_multiply(B=b, A=a) == np.array([[3.], [7], [11]]))
with pytest.raises(ValueError) as excinfo:
m.matrix_multiply(b, a)
assert str(excinfo.value) == 'Nonconformable matrices!'
with pytest.raises(ValueError) as excinfo:
m.matrix_multiply(A=b, B=a)
assert str(excinfo.value) == 'Nonconformable matrices!'
with pytest.raises(ValueError) as excinfo:
m.matrix_multiply(B=a, A=b)
assert str(excinfo.value) == 'Nonconformable matrices!'
@pytest.requires_eigen_and_scipy
def test_sparse():
assert_sparse_equal_ref(m.sparse_r())
assert_sparse_equal_ref(m.sparse_c())
assert_sparse_equal_ref(m.sparse_copy_r(m.sparse_r()))
assert_sparse_equal_ref(m.sparse_copy_c(m.sparse_c()))
assert_sparse_equal_ref(m.sparse_copy_r(m.sparse_c()))
assert_sparse_equal_ref(m.sparse_copy_c(m.sparse_r()))
@pytest.requires_eigen_and_scipy
def test_sparse_signature(doc):
assert doc(m.sparse_copy_r) == """
sparse_copy_r(arg0: scipy.sparse.csr_matrix[float32]) -> scipy.sparse.csr_matrix[float32]
""" # noqa: E501 line too long
assert doc(m.sparse_copy_c) == """
sparse_copy_c(arg0: scipy.sparse.csc_matrix[float32]) -> scipy.sparse.csc_matrix[float32]
""" # noqa: E501 line too long
def test_issue738():
"""Ignore strides on a length-1 dimension (even if they would be incompatible length > 1)"""
assert np.all(m.iss738_f1(np.array([[1., 2, 3]])) == np.array([[1., 102, 203]]))
assert np.all(m.iss738_f1(np.array([[1.], [2], [3]])) == np.array([[1.], [12], [23]]))
assert np.all(m.iss738_f2(np.array([[1., 2, 3]])) == np.array([[1., 102, 203]]))
assert np.all(m.iss738_f2(np.array([[1.], [2], [3]])) == np.array([[1.], [12], [23]]))
def test_issue1105():
"""Issue 1105: 1xN or Nx1 input arrays weren't accepted for eigen
compile-time row vectors or column vector"""
assert m.iss1105_row(np.ones((1, 7)))
assert m.iss1105_col(np.ones((7, 1)))
# These should still fail (incompatible dimensions):
with pytest.raises(TypeError) as excinfo:
m.iss1105_row(np.ones((7, 1)))
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.iss1105_col(np.ones((1, 7)))
assert "incompatible function arguments" in str(excinfo.value)
def test_custom_operator_new():
"""Using Eigen types as member variables requires a class-specific
operator new with proper alignment"""
o = m.CustomOperatorNew()
np.testing.assert_allclose(o.a, 0.0)
np.testing.assert_allclose(o.b.diagonal(), 1.0)
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_eigen.py | test_eigen.py |
"""pytest configuration
Extends output capture as needed by pybind11: ignore constructors, optional unordered lines.
Adds docstring and exceptions message sanitizers: ignore Python 2 vs 3 differences.
"""
import pytest
import textwrap
import difflib
import re
import sys
import contextlib
import platform
import gc
_unicode_marker = re.compile(r'u(\'[^\']*\')')
_long_marker = re.compile(r'([0-9])L')
_hexadecimal = re.compile(r'0x[0-9a-fA-F]+')
# test_async.py requires support for async and await
collect_ignore = []
if sys.version_info[:2] < (3, 5):
collect_ignore.append("test_async.py")
def _strip_and_dedent(s):
"""For triple-quote strings"""
return textwrap.dedent(s.lstrip('\n').rstrip())
def _split_and_sort(s):
"""For output which does not require specific line order"""
return sorted(_strip_and_dedent(s).splitlines())
def _make_explanation(a, b):
"""Explanation for a failed assert -- the a and b arguments are List[str]"""
return ["--- actual / +++ expected"] + [line.strip('\n') for line in difflib.ndiff(a, b)]
class Output(object):
"""Basic output post-processing and comparison"""
def __init__(self, string):
self.string = string
self.explanation = []
def __str__(self):
return self.string
def __eq__(self, other):
# Ignore constructor/destructor output which is prefixed with "###"
a = [line for line in self.string.strip().splitlines() if not line.startswith("###")]
b = _strip_and_dedent(other).splitlines()
if a == b:
return True
else:
self.explanation = _make_explanation(a, b)
return False
class Unordered(Output):
"""Custom comparison for output without strict line ordering"""
def __eq__(self, other):
a = _split_and_sort(self.string)
b = _split_and_sort(other)
if a == b:
return True
else:
self.explanation = _make_explanation(a, b)
return False
class Capture(object):
def __init__(self, capfd):
self.capfd = capfd
self.out = ""
self.err = ""
def __enter__(self):
self.capfd.readouterr()
return self
def __exit__(self, *args):
self.out, self.err = self.capfd.readouterr()
def __eq__(self, other):
a = Output(self.out)
b = other
if a == b:
return True
else:
self.explanation = a.explanation
return False
def __str__(self):
return self.out
def __contains__(self, item):
return item in self.out
@property
def unordered(self):
return Unordered(self.out)
@property
def stderr(self):
return Output(self.err)
@pytest.fixture
def capture(capsys):
"""Extended `capsys` with context manager and custom equality operators"""
return Capture(capsys)
class SanitizedString(object):
def __init__(self, sanitizer):
self.sanitizer = sanitizer
self.string = ""
self.explanation = []
def __call__(self, thing):
self.string = self.sanitizer(thing)
return self
def __eq__(self, other):
a = self.string
b = _strip_and_dedent(other)
if a == b:
return True
else:
self.explanation = _make_explanation(a.splitlines(), b.splitlines())
return False
def _sanitize_general(s):
s = s.strip()
s = s.replace("pybind11_tests.", "m.")
s = s.replace("unicode", "str")
s = _long_marker.sub(r"\1", s)
s = _unicode_marker.sub(r"\1", s)
return s
def _sanitize_docstring(thing):
s = thing.__doc__
s = _sanitize_general(s)
return s
@pytest.fixture
def doc():
"""Sanitize docstrings and add custom failure explanation"""
return SanitizedString(_sanitize_docstring)
def _sanitize_message(thing):
s = str(thing)
s = _sanitize_general(s)
s = _hexadecimal.sub("0", s)
return s
@pytest.fixture
def msg():
"""Sanitize messages and add custom failure explanation"""
return SanitizedString(_sanitize_message)
# noinspection PyUnusedLocal
def pytest_assertrepr_compare(op, left, right):
"""Hook to insert custom failure explanation"""
if hasattr(left, 'explanation'):
return left.explanation
@contextlib.contextmanager
def suppress(exception):
"""Suppress the desired exception"""
try:
yield
except exception:
pass
def gc_collect():
''' Run the garbage collector twice (needed when running
reference counting tests with PyPy) '''
gc.collect()
gc.collect()
def pytest_configure():
"""Add import suppression and test requirements to `pytest` namespace"""
try:
import numpy as np
except ImportError:
np = None
try:
import scipy
except ImportError:
scipy = None
try:
from pybind11_tests.eigen import have_eigen
except ImportError:
have_eigen = False
pypy = platform.python_implementation() == "PyPy"
skipif = pytest.mark.skipif
pytest.suppress = suppress
pytest.requires_numpy = skipif(not np, reason="numpy is not installed")
pytest.requires_scipy = skipif(not np, reason="scipy is not installed")
pytest.requires_eigen_and_numpy = skipif(not have_eigen or not np,
reason="eigen and/or numpy are not installed")
pytest.requires_eigen_and_scipy = skipif(
not have_eigen or not scipy, reason="eigen and/or scipy are not installed")
pytest.unsupported_on_pypy = skipif(pypy, reason="unsupported on PyPy")
pytest.unsupported_on_py2 = skipif(sys.version_info.major < 3,
reason="unsupported on Python 2.x")
pytest.gc_collect = gc_collect
def _test_import_pybind11():
"""Early diagnostic for test module initialization errors
When there is an error during initialization, the first import will report the
real error while all subsequent imports will report nonsense. This import test
is done early (in the pytest configuration file, before any tests) in order to
avoid the noise of having all tests fail with identical error messages.
Any possible exception is caught here and reported manually *without* the stack
trace. This further reduces noise since the trace would only show pytest internals
which are not useful for debugging pybind11 module issues.
"""
# noinspection PyBroadException
try:
import pybind11_tests # noqa: F401 imported but unused
except Exception as e:
print("Failed to import pybind11_tests from pytest:")
print(" {}: {}".format(type(e).__name__, e))
sys.exit(1)
_test_import_pybind11()
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/conftest.py | conftest.py |
from pybind11_tests import constants_and_functions as m
def test_constants():
assert m.some_constant == 14
def test_function_overloading():
assert m.test_function() == "test_function()"
assert m.test_function(7) == "test_function(7)"
assert m.test_function(m.MyEnum.EFirstEntry) == "test_function(enum=1)"
assert m.test_function(m.MyEnum.ESecondEntry) == "test_function(enum=2)"
assert m.test_function() == "test_function()"
assert m.test_function("abcd") == "test_function(char *)"
assert m.test_function(1, 1.0) == "test_function(int, float)"
assert m.test_function(1, 1.0) == "test_function(int, float)"
assert m.test_function(2.0, 2) == "test_function(float, int)"
def test_bytes():
assert m.print_bytes(m.return_bytes()) == "bytes[1 0 2 0]"
def test_exception_specifiers():
c = m.C()
assert c.m1(2) == 1
assert c.m2(3) == 1
assert c.m3(5) == 2
assert c.m4(7) == 3
assert c.m5(10) == 5
assert c.m6(14) == 8
assert c.m7(20) == 13
assert c.m8(29) == 21
assert m.f1(33) == 34
assert m.f2(53) == 55
assert m.f3(86) == 89
assert m.f4(140) == 144
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_constants_and_functions.py | test_constants_and_functions.py |
import pytest
from pybind11_tests import opaque_types as m
from pybind11_tests import ConstructorStats, UserType
def test_string_list():
lst = m.StringList()
lst.push_back("Element 1")
lst.push_back("Element 2")
assert m.print_opaque_list(lst) == "Opaque list: [Element 1, Element 2]"
assert lst.back() == "Element 2"
for i, k in enumerate(lst, start=1):
assert k == "Element {}".format(i)
lst.pop_back()
assert m.print_opaque_list(lst) == "Opaque list: [Element 1]"
cvp = m.ClassWithSTLVecProperty()
assert m.print_opaque_list(cvp.stringList) == "Opaque list: []"
cvp.stringList = lst
cvp.stringList.push_back("Element 3")
assert m.print_opaque_list(cvp.stringList) == "Opaque list: [Element 1, Element 3]"
def test_pointers(msg):
living_before = ConstructorStats.get(UserType).alive()
assert m.get_void_ptr_value(m.return_void_ptr()) == 0x1234
assert m.get_void_ptr_value(UserType()) # Should also work for other C++ types
assert ConstructorStats.get(UserType).alive() == living_before
with pytest.raises(TypeError) as excinfo:
m.get_void_ptr_value([1, 2, 3]) # This should not work
assert msg(excinfo.value) == """
get_void_ptr_value(): incompatible function arguments. The following argument types are supported:
1. (arg0: capsule) -> int
Invoked with: [1, 2, 3]
""" # noqa: E501 line too long
assert m.return_null_str() is None
assert m.get_null_str_value(m.return_null_str()) is not None
ptr = m.return_unique_ptr()
assert "StringList" in repr(ptr)
assert m.print_opaque_list(ptr) == "Opaque list: [some value]"
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_opaque_types.py | test_opaque_types.py |
import asyncio
import pytest
from pybind11_tests import async_module as m
@pytest.fixture
def event_loop():
loop = asyncio.new_event_loop()
yield loop
loop.close()
async def get_await_result(x):
return await x
def test_await(event_loop):
assert 5 == event_loop.run_until_complete(get_await_result(m.SupportsAsync()))
def test_await_missing(event_loop):
with pytest.raises(TypeError):
event_loop.run_until_complete(get_await_result(m.DoesNotSupportAsync()))
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_async.py | test_async.py |
import pytest
from pybind11_tests import ConstructorStats
from pybind11_tests import multiple_inheritance as m
def test_multiple_inheritance_cpp():
mt = m.MIType(3, 4)
assert mt.foo() == 3
assert mt.bar() == 4
def test_multiple_inheritance_mix1():
class Base1:
def __init__(self, i):
self.i = i
def foo(self):
return self.i
class MITypePy(Base1, m.Base2):
def __init__(self, i, j):
Base1.__init__(self, i)
m.Base2.__init__(self, j)
mt = MITypePy(3, 4)
assert mt.foo() == 3
assert mt.bar() == 4
def test_multiple_inheritance_mix2():
class Base2:
def __init__(self, i):
self.i = i
def bar(self):
return self.i
class MITypePy(m.Base1, Base2):
def __init__(self, i, j):
m.Base1.__init__(self, i)
Base2.__init__(self, j)
mt = MITypePy(3, 4)
assert mt.foo() == 3
assert mt.bar() == 4
def test_multiple_inheritance_python():
class MI1(m.Base1, m.Base2):
def __init__(self, i, j):
m.Base1.__init__(self, i)
m.Base2.__init__(self, j)
class B1(object):
def v(self):
return 1
class MI2(B1, m.Base1, m.Base2):
def __init__(self, i, j):
B1.__init__(self)
m.Base1.__init__(self, i)
m.Base2.__init__(self, j)
class MI3(MI2):
def __init__(self, i, j):
MI2.__init__(self, i, j)
class MI4(MI3, m.Base2):
def __init__(self, i, j):
MI3.__init__(self, i, j)
# This should be ignored (Base2 is already initialized via MI2):
m.Base2.__init__(self, i + 100)
class MI5(m.Base2, B1, m.Base1):
def __init__(self, i, j):
B1.__init__(self)
m.Base1.__init__(self, i)
m.Base2.__init__(self, j)
class MI6(m.Base2, B1):
def __init__(self, i):
m.Base2.__init__(self, i)
B1.__init__(self)
class B2(B1):
def v(self):
return 2
class B3(object):
def v(self):
return 3
class B4(B3, B2):
def v(self):
return 4
class MI7(B4, MI6):
def __init__(self, i):
B4.__init__(self)
MI6.__init__(self, i)
class MI8(MI6, B3):
def __init__(self, i):
MI6.__init__(self, i)
B3.__init__(self)
class MI8b(B3, MI6):
def __init__(self, i):
B3.__init__(self)
MI6.__init__(self, i)
mi1 = MI1(1, 2)
assert mi1.foo() == 1
assert mi1.bar() == 2
mi2 = MI2(3, 4)
assert mi2.v() == 1
assert mi2.foo() == 3
assert mi2.bar() == 4
mi3 = MI3(5, 6)
assert mi3.v() == 1
assert mi3.foo() == 5
assert mi3.bar() == 6
mi4 = MI4(7, 8)
assert mi4.v() == 1
assert mi4.foo() == 7
assert mi4.bar() == 8
mi5 = MI5(10, 11)
assert mi5.v() == 1
assert mi5.foo() == 10
assert mi5.bar() == 11
mi6 = MI6(12)
assert mi6.v() == 1
assert mi6.bar() == 12
mi7 = MI7(13)
assert mi7.v() == 4
assert mi7.bar() == 13
mi8 = MI8(14)
assert mi8.v() == 1
assert mi8.bar() == 14
mi8b = MI8b(15)
assert mi8b.v() == 3
assert mi8b.bar() == 15
def test_multiple_inheritance_python_many_bases():
class MIMany14(m.BaseN1, m.BaseN2, m.BaseN3, m.BaseN4):
def __init__(self):
m.BaseN1.__init__(self, 1)
m.BaseN2.__init__(self, 2)
m.BaseN3.__init__(self, 3)
m.BaseN4.__init__(self, 4)
class MIMany58(m.BaseN5, m.BaseN6, m.BaseN7, m.BaseN8):
def __init__(self):
m.BaseN5.__init__(self, 5)
m.BaseN6.__init__(self, 6)
m.BaseN7.__init__(self, 7)
m.BaseN8.__init__(self, 8)
class MIMany916(m.BaseN9, m.BaseN10, m.BaseN11, m.BaseN12, m.BaseN13, m.BaseN14, m.BaseN15,
m.BaseN16):
def __init__(self):
m.BaseN9.__init__(self, 9)
m.BaseN10.__init__(self, 10)
m.BaseN11.__init__(self, 11)
m.BaseN12.__init__(self, 12)
m.BaseN13.__init__(self, 13)
m.BaseN14.__init__(self, 14)
m.BaseN15.__init__(self, 15)
m.BaseN16.__init__(self, 16)
class MIMany19(MIMany14, MIMany58, m.BaseN9):
def __init__(self):
MIMany14.__init__(self)
MIMany58.__init__(self)
m.BaseN9.__init__(self, 9)
class MIMany117(MIMany14, MIMany58, MIMany916, m.BaseN17):
def __init__(self):
MIMany14.__init__(self)
MIMany58.__init__(self)
MIMany916.__init__(self)
m.BaseN17.__init__(self, 17)
# Inherits from 4 registered C++ classes: can fit in one pointer on any modern arch:
a = MIMany14()
for i in range(1, 4):
assert getattr(a, "f" + str(i))() == 2 * i
# Inherits from 8: requires 1/2 pointers worth of holder flags on 32/64-bit arch:
b = MIMany916()
for i in range(9, 16):
assert getattr(b, "f" + str(i))() == 2 * i
# Inherits from 9: requires >= 2 pointers worth of holder flags
c = MIMany19()
for i in range(1, 9):
assert getattr(c, "f" + str(i))() == 2 * i
# Inherits from 17: requires >= 3 pointers worth of holder flags
d = MIMany117()
for i in range(1, 17):
assert getattr(d, "f" + str(i))() == 2 * i
def test_multiple_inheritance_virtbase():
class MITypePy(m.Base12a):
def __init__(self, i, j):
m.Base12a.__init__(self, i, j)
mt = MITypePy(3, 4)
assert mt.bar() == 4
assert m.bar_base2a(mt) == 4
assert m.bar_base2a_sharedptr(mt) == 4
def test_mi_static_properties():
"""Mixing bases with and without static properties should be possible
and the result should be independent of base definition order"""
for d in (m.VanillaStaticMix1(), m.VanillaStaticMix2()):
assert d.vanilla() == "Vanilla"
assert d.static_func1() == "WithStatic1"
assert d.static_func2() == "WithStatic2"
assert d.static_func() == d.__class__.__name__
m.WithStatic1.static_value1 = 1
m.WithStatic2.static_value2 = 2
assert d.static_value1 == 1
assert d.static_value2 == 2
assert d.static_value == 12
d.static_value1 = 0
assert d.static_value1 == 0
d.static_value2 = 0
assert d.static_value2 == 0
d.static_value = 0
assert d.static_value == 0
@pytest.unsupported_on_pypy
def test_mi_dynamic_attributes():
"""Mixing bases with and without dynamic attribute support"""
for d in (m.VanillaDictMix1(), m.VanillaDictMix2()):
d.dynamic = 1
assert d.dynamic == 1
def test_mi_unaligned_base():
"""Returning an offset (non-first MI) base class pointer should recognize the instance"""
n_inst = ConstructorStats.detail_reg_inst()
c = m.I801C()
d = m.I801D()
# + 4 below because we have the two instances, and each instance has offset base I801B2
assert ConstructorStats.detail_reg_inst() == n_inst + 4
b1c = m.i801b1_c(c)
assert b1c is c
b2c = m.i801b2_c(c)
assert b2c is c
b1d = m.i801b1_d(d)
assert b1d is d
b2d = m.i801b2_d(d)
assert b2d is d
assert ConstructorStats.detail_reg_inst() == n_inst + 4 # no extra instances
del c, b1c, b2c
assert ConstructorStats.detail_reg_inst() == n_inst + 2
del d, b1d, b2d
assert ConstructorStats.detail_reg_inst() == n_inst
def test_mi_base_return():
"""Tests returning an offset (non-first MI) base class pointer to a derived instance"""
n_inst = ConstructorStats.detail_reg_inst()
c1 = m.i801c_b1()
assert type(c1) is m.I801C
assert c1.a == 1
assert c1.b == 2
d1 = m.i801d_b1()
assert type(d1) is m.I801D
assert d1.a == 1
assert d1.b == 2
assert ConstructorStats.detail_reg_inst() == n_inst + 4
c2 = m.i801c_b2()
assert type(c2) is m.I801C
assert c2.a == 1
assert c2.b == 2
d2 = m.i801d_b2()
assert type(d2) is m.I801D
assert d2.a == 1
assert d2.b == 2
assert ConstructorStats.detail_reg_inst() == n_inst + 8
del c2
assert ConstructorStats.detail_reg_inst() == n_inst + 6
del c1, d1, d2
assert ConstructorStats.detail_reg_inst() == n_inst
# Returning an unregistered derived type with a registered base; we won't
# pick up the derived type, obviously, but should still work (as an object
# of whatever type was returned).
e1 = m.i801e_c()
assert type(e1) is m.I801C
assert e1.a == 1
assert e1.b == 2
e2 = m.i801e_b2()
assert type(e2) is m.I801B2
assert e2.b == 2
def test_diamond_inheritance():
"""Tests that diamond inheritance works as expected (issue #959)"""
# Issue #959: this shouldn't segfault:
d = m.D()
# Make sure all the various distinct pointers are all recognized as registered instances:
assert d is d.c0()
assert d is d.c1()
assert d is d.b()
assert d is d.c0().b()
assert d is d.c1().b()
assert d is d.c0().c1().b().c0().b()
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_multiple_inheritance.py | test_multiple_inheritance.py |
import pytest
from pybind11_tests import pickling as m
try:
import cPickle as pickle # Use cPickle on Python 2.7
except ImportError:
import pickle
@pytest.mark.parametrize("cls_name", ["Pickleable", "PickleableNew"])
def test_roundtrip(cls_name):
cls = getattr(m, cls_name)
p = cls("test_value")
p.setExtra1(15)
p.setExtra2(48)
data = pickle.dumps(p, 2) # Must use pickle protocol >= 2
p2 = pickle.loads(data)
assert p2.value() == p.value()
assert p2.extra1() == p.extra1()
assert p2.extra2() == p.extra2()
@pytest.unsupported_on_pypy
@pytest.mark.parametrize("cls_name", ["PickleableWithDict", "PickleableWithDictNew"])
def test_roundtrip_with_dict(cls_name):
cls = getattr(m, cls_name)
p = cls("test_value")
p.extra = 15
p.dynamic = "Attribute"
data = pickle.dumps(p, pickle.HIGHEST_PROTOCOL)
p2 = pickle.loads(data)
assert p2.value == p.value
assert p2.extra == p.extra
assert p2.dynamic == p.dynamic
def test_enum_pickle():
from pybind11_tests import enums as e
data = pickle.dumps(e.EOne, 2)
assert e.EOne == pickle.loads(data)
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_pickling.py | test_pickling.py |
import pytest
from pybind11_tests import local_bindings as m
def test_load_external():
"""Load a `py::module_local` type that's only registered in an external module"""
import pybind11_cross_module_tests as cm
assert m.load_external1(cm.ExternalType1(11)) == 11
assert m.load_external2(cm.ExternalType2(22)) == 22
with pytest.raises(TypeError) as excinfo:
assert m.load_external2(cm.ExternalType1(21)) == 21
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
assert m.load_external1(cm.ExternalType2(12)) == 12
assert "incompatible function arguments" in str(excinfo.value)
def test_local_bindings():
"""Tests that duplicate `py::module_local` class bindings work across modules"""
# Make sure we can load the second module with the conflicting (but local) definition:
import pybind11_cross_module_tests as cm
i1 = m.LocalType(5)
assert i1.get() == 4
assert i1.get3() == 8
i2 = cm.LocalType(10)
assert i2.get() == 11
assert i2.get2() == 12
assert not hasattr(i1, 'get2')
assert not hasattr(i2, 'get3')
# Loading within the local module
assert m.local_value(i1) == 5
assert cm.local_value(i2) == 10
# Cross-module loading works as well (on failure, the type loader looks for
# external module-local converters):
assert m.local_value(i2) == 10
assert cm.local_value(i1) == 5
def test_nonlocal_failure():
"""Tests that attempting to register a non-local type in multiple modules fails"""
import pybind11_cross_module_tests as cm
with pytest.raises(RuntimeError) as excinfo:
cm.register_nonlocal()
assert str(excinfo.value) == 'generic_type: type "NonLocalType" is already registered!'
def test_duplicate_local():
"""Tests expected failure when registering a class twice with py::local in the same module"""
with pytest.raises(RuntimeError) as excinfo:
m.register_local_external()
import pybind11_tests
assert str(excinfo.value) == (
'generic_type: type "LocalExternal" is already registered!'
if hasattr(pybind11_tests, 'class_') else 'test_class not enabled')
def test_stl_bind_local():
import pybind11_cross_module_tests as cm
v1, v2 = m.LocalVec(), cm.LocalVec()
v1.append(m.LocalType(1))
v1.append(m.LocalType(2))
v2.append(cm.LocalType(1))
v2.append(cm.LocalType(2))
# Cross module value loading:
v1.append(cm.LocalType(3))
v2.append(m.LocalType(3))
assert [i.get() for i in v1] == [0, 1, 2]
assert [i.get() for i in v2] == [2, 3, 4]
v3, v4 = m.NonLocalVec(), cm.NonLocalVec2()
v3.append(m.NonLocalType(1))
v3.append(m.NonLocalType(2))
v4.append(m.NonLocal2(3))
v4.append(m.NonLocal2(4))
assert [i.get() for i in v3] == [1, 2]
assert [i.get() for i in v4] == [13, 14]
d1, d2 = m.LocalMap(), cm.LocalMap()
d1["a"] = v1[0]
d1["b"] = v1[1]
d2["c"] = v2[0]
d2["d"] = v2[1]
assert {i: d1[i].get() for i in d1} == {'a': 0, 'b': 1}
assert {i: d2[i].get() for i in d2} == {'c': 2, 'd': 3}
def test_stl_bind_global():
import pybind11_cross_module_tests as cm
with pytest.raises(RuntimeError) as excinfo:
cm.register_nonlocal_map()
assert str(excinfo.value) == 'generic_type: type "NonLocalMap" is already registered!'
with pytest.raises(RuntimeError) as excinfo:
cm.register_nonlocal_vec()
assert str(excinfo.value) == 'generic_type: type "NonLocalVec" is already registered!'
with pytest.raises(RuntimeError) as excinfo:
cm.register_nonlocal_map2()
assert str(excinfo.value) == 'generic_type: type "NonLocalMap2" is already registered!'
def test_mixed_local_global():
"""Local types take precedence over globally registered types: a module with a `module_local`
type can be registered even if the type is already registered globally. With the module,
casting will go to the local type; outside the module casting goes to the global type."""
import pybind11_cross_module_tests as cm
m.register_mixed_global()
m.register_mixed_local()
a = []
a.append(m.MixedGlobalLocal(1))
a.append(m.MixedLocalGlobal(2))
a.append(m.get_mixed_gl(3))
a.append(m.get_mixed_lg(4))
assert [x.get() for x in a] == [101, 1002, 103, 1004]
cm.register_mixed_global_local()
cm.register_mixed_local_global()
a.append(m.MixedGlobalLocal(5))
a.append(m.MixedLocalGlobal(6))
a.append(cm.MixedGlobalLocal(7))
a.append(cm.MixedLocalGlobal(8))
a.append(m.get_mixed_gl(9))
a.append(m.get_mixed_lg(10))
a.append(cm.get_mixed_gl(11))
a.append(cm.get_mixed_lg(12))
assert [x.get() for x in a] == \
[101, 1002, 103, 1004, 105, 1006, 207, 2008, 109, 1010, 211, 2012]
def test_internal_locals_differ():
"""Makes sure the internal local type map differs across the two modules"""
import pybind11_cross_module_tests as cm
assert m.local_cpp_types_addr() != cm.local_cpp_types_addr()
def test_stl_caster_vs_stl_bind(msg):
"""One module uses a generic vector caster from `<pybind11/stl.h>` while the other
exports `std::vector<int>` via `py:bind_vector` and `py::module_local`"""
import pybind11_cross_module_tests as cm
v1 = cm.VectorInt([1, 2, 3])
assert m.load_vector_via_caster(v1) == 6
assert cm.load_vector_via_binding(v1) == 6
v2 = [1, 2, 3]
assert m.load_vector_via_caster(v2) == 6
with pytest.raises(TypeError) as excinfo:
cm.load_vector_via_binding(v2) == 6
assert msg(excinfo.value) == """
load_vector_via_binding(): incompatible function arguments. The following argument types are supported:
1. (arg0: pybind11_cross_module_tests.VectorInt) -> int
Invoked with: [1, 2, 3]
""" # noqa: E501 line too long
def test_cross_module_calls():
import pybind11_cross_module_tests as cm
v1 = m.LocalVec()
v1.append(m.LocalType(1))
v2 = cm.LocalVec()
v2.append(cm.LocalType(2))
# Returning the self pointer should get picked up as returning an existing
# instance (even when that instance is of a foreign, non-local type).
assert m.return_self(v1) is v1
assert cm.return_self(v2) is v2
assert m.return_self(v2) is v2
assert cm.return_self(v1) is v1
assert m.LocalVec is not cm.LocalVec
# Returning a copy, on the other hand, always goes to the local type,
# regardless of where the source type came from.
assert type(m.return_copy(v1)) is m.LocalVec
assert type(m.return_copy(v2)) is m.LocalVec
assert type(cm.return_copy(v1)) is cm.LocalVec
assert type(cm.return_copy(v2)) is cm.LocalVec
# Test the example given in the documentation (which also tests inheritance casting):
mycat = m.Cat("Fluffy")
mydog = cm.Dog("Rover")
assert mycat.get_name() == "Fluffy"
assert mydog.name() == "Rover"
assert m.Cat.__base__.__name__ == "Pet"
assert cm.Dog.__base__.__name__ == "Pet"
assert m.Cat.__base__ is not cm.Dog.__base__
assert m.pet_name(mycat) == "Fluffy"
assert m.pet_name(mydog) == "Rover"
assert cm.pet_name(mycat) == "Fluffy"
assert cm.pet_name(mydog) == "Rover"
assert m.MixGL is not cm.MixGL
a = m.MixGL(1)
b = cm.MixGL(2)
assert m.get_gl_value(a) == 11
assert m.get_gl_value(b) == 12
assert cm.get_gl_value(a) == 101
assert cm.get_gl_value(b) == 102
c, d = m.MixGL2(3), cm.MixGL2(4)
with pytest.raises(TypeError) as excinfo:
m.get_gl_value(c)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.get_gl_value(d)
assert "incompatible function arguments" in str(excinfo.value)
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_local_bindings.py | test_local_bindings.py |
import pytest
from pybind11_tests import call_policies as m
from pybind11_tests import ConstructorStats
def test_keep_alive_argument(capture):
n_inst = ConstructorStats.detail_reg_inst()
with capture:
p = m.Parent()
assert capture == "Allocating parent."
with capture:
p.addChild(m.Child())
assert ConstructorStats.detail_reg_inst() == n_inst + 1
assert capture == """
Allocating child.
Releasing child.
"""
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert capture == "Releasing parent."
with capture:
p = m.Parent()
assert capture == "Allocating parent."
with capture:
p.addChildKeepAlive(m.Child())
assert ConstructorStats.detail_reg_inst() == n_inst + 2
assert capture == "Allocating child."
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert capture == """
Releasing parent.
Releasing child.
"""
def test_keep_alive_return_value(capture):
n_inst = ConstructorStats.detail_reg_inst()
with capture:
p = m.Parent()
assert capture == "Allocating parent."
with capture:
p.returnChild()
assert ConstructorStats.detail_reg_inst() == n_inst + 1
assert capture == """
Allocating child.
Releasing child.
"""
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert capture == "Releasing parent."
with capture:
p = m.Parent()
assert capture == "Allocating parent."
with capture:
p.returnChildKeepAlive()
assert ConstructorStats.detail_reg_inst() == n_inst + 2
assert capture == "Allocating child."
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert capture == """
Releasing parent.
Releasing child.
"""
# https://bitbucket.org/pypy/pypy/issues/2447
@pytest.unsupported_on_pypy
def test_alive_gc(capture):
n_inst = ConstructorStats.detail_reg_inst()
p = m.ParentGC()
p.addChildKeepAlive(m.Child())
assert ConstructorStats.detail_reg_inst() == n_inst + 2
lst = [p]
lst.append(lst) # creates a circular reference
with capture:
del p, lst
assert ConstructorStats.detail_reg_inst() == n_inst
assert capture == """
Releasing parent.
Releasing child.
"""
def test_alive_gc_derived(capture):
class Derived(m.Parent):
pass
n_inst = ConstructorStats.detail_reg_inst()
p = Derived()
p.addChildKeepAlive(m.Child())
assert ConstructorStats.detail_reg_inst() == n_inst + 2
lst = [p]
lst.append(lst) # creates a circular reference
with capture:
del p, lst
assert ConstructorStats.detail_reg_inst() == n_inst
assert capture == """
Releasing parent.
Releasing child.
"""
def test_alive_gc_multi_derived(capture):
class Derived(m.Parent, m.Child):
def __init__(self):
m.Parent.__init__(self)
m.Child.__init__(self)
n_inst = ConstructorStats.detail_reg_inst()
p = Derived()
p.addChildKeepAlive(m.Child())
# +3 rather than +2 because Derived corresponds to two registered instances
assert ConstructorStats.detail_reg_inst() == n_inst + 3
lst = [p]
lst.append(lst) # creates a circular reference
with capture:
del p, lst
assert ConstructorStats.detail_reg_inst() == n_inst
assert capture == """
Releasing parent.
Releasing child.
Releasing child.
"""
def test_return_none(capture):
n_inst = ConstructorStats.detail_reg_inst()
with capture:
p = m.Parent()
assert capture == "Allocating parent."
with capture:
p.returnNullChildKeepAliveChild()
assert ConstructorStats.detail_reg_inst() == n_inst + 1
assert capture == ""
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert capture == "Releasing parent."
with capture:
p = m.Parent()
assert capture == "Allocating parent."
with capture:
p.returnNullChildKeepAliveParent()
assert ConstructorStats.detail_reg_inst() == n_inst + 1
assert capture == ""
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert capture == "Releasing parent."
def test_keep_alive_constructor(capture):
n_inst = ConstructorStats.detail_reg_inst()
with capture:
p = m.Parent(m.Child())
assert ConstructorStats.detail_reg_inst() == n_inst + 2
assert capture == """
Allocating child.
Allocating parent.
"""
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert capture == """
Releasing parent.
Releasing child.
"""
def test_call_guard():
assert m.unguarded_call() == "unguarded"
assert m.guarded_call() == "guarded"
assert m.multiple_guards_correct_order() == "guarded & guarded"
assert m.multiple_guards_wrong_order() == "unguarded & guarded"
if hasattr(m, "with_gil"):
assert m.with_gil() == "GIL held"
assert m.without_gil() == "GIL released"
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_call_policies.py | test_call_policies.py |
import pytest
import re
from pybind11_tests import factory_constructors as m
from pybind11_tests.factory_constructors import tag
from pybind11_tests import ConstructorStats
def test_init_factory_basic():
"""Tests py::init_factory() wrapper around various ways of returning the object"""
cstats = [ConstructorStats.get(c) for c in [m.TestFactory1, m.TestFactory2, m.TestFactory3]]
cstats[0].alive() # force gc
n_inst = ConstructorStats.detail_reg_inst()
x1 = m.TestFactory1(tag.unique_ptr, 3)
assert x1.value == "3"
y1 = m.TestFactory1(tag.pointer)
assert y1.value == "(empty)"
z1 = m.TestFactory1("hi!")
assert z1.value == "hi!"
assert ConstructorStats.detail_reg_inst() == n_inst + 3
x2 = m.TestFactory2(tag.move)
assert x2.value == "(empty2)"
y2 = m.TestFactory2(tag.pointer, 7)
assert y2.value == "7"
z2 = m.TestFactory2(tag.unique_ptr, "hi again")
assert z2.value == "hi again"
assert ConstructorStats.detail_reg_inst() == n_inst + 6
x3 = m.TestFactory3(tag.shared_ptr)
assert x3.value == "(empty3)"
y3 = m.TestFactory3(tag.pointer, 42)
assert y3.value == "42"
z3 = m.TestFactory3("bye")
assert z3.value == "bye"
with pytest.raises(TypeError) as excinfo:
m.TestFactory3(tag.null_ptr)
assert str(excinfo.value) == "pybind11::init(): factory function returned nullptr"
assert [i.alive() for i in cstats] == [3, 3, 3]
assert ConstructorStats.detail_reg_inst() == n_inst + 9
del x1, y2, y3, z3
assert [i.alive() for i in cstats] == [2, 2, 1]
assert ConstructorStats.detail_reg_inst() == n_inst + 5
del x2, x3, y1, z1, z2
assert [i.alive() for i in cstats] == [0, 0, 0]
assert ConstructorStats.detail_reg_inst() == n_inst
assert [i.values() for i in cstats] == [
["3", "hi!"],
["7", "hi again"],
["42", "bye"]
]
assert [i.default_constructions for i in cstats] == [1, 1, 1]
def test_init_factory_signature(msg):
with pytest.raises(TypeError) as excinfo:
m.TestFactory1("invalid", "constructor", "arguments")
assert msg(excinfo.value) == """
__init__(): incompatible constructor arguments. The following argument types are supported:
1. m.factory_constructors.TestFactory1(arg0: m.factory_constructors.tag.unique_ptr_tag, arg1: int)
2. m.factory_constructors.TestFactory1(arg0: str)
3. m.factory_constructors.TestFactory1(arg0: m.factory_constructors.tag.pointer_tag)
4. m.factory_constructors.TestFactory1(arg0: handle, arg1: int, arg2: handle)
Invoked with: 'invalid', 'constructor', 'arguments'
""" # noqa: E501 line too long
assert msg(m.TestFactory1.__init__.__doc__) == """
__init__(*args, **kwargs)
Overloaded function.
1. __init__(self: m.factory_constructors.TestFactory1, arg0: m.factory_constructors.tag.unique_ptr_tag, arg1: int) -> None
2. __init__(self: m.factory_constructors.TestFactory1, arg0: str) -> None
3. __init__(self: m.factory_constructors.TestFactory1, arg0: m.factory_constructors.tag.pointer_tag) -> None
4. __init__(self: m.factory_constructors.TestFactory1, arg0: handle, arg1: int, arg2: handle) -> None
""" # noqa: E501 line too long
def test_init_factory_casting():
"""Tests py::init_factory() wrapper with various upcasting and downcasting returns"""
cstats = [ConstructorStats.get(c) for c in [m.TestFactory3, m.TestFactory4, m.TestFactory5]]
cstats[0].alive() # force gc
n_inst = ConstructorStats.detail_reg_inst()
# Construction from derived references:
a = m.TestFactory3(tag.pointer, tag.TF4, 4)
assert a.value == "4"
b = m.TestFactory3(tag.shared_ptr, tag.TF4, 5)
assert b.value == "5"
c = m.TestFactory3(tag.pointer, tag.TF5, 6)
assert c.value == "6"
d = m.TestFactory3(tag.shared_ptr, tag.TF5, 7)
assert d.value == "7"
assert ConstructorStats.detail_reg_inst() == n_inst + 4
# Shared a lambda with TF3:
e = m.TestFactory4(tag.pointer, tag.TF4, 8)
assert e.value == "8"
assert ConstructorStats.detail_reg_inst() == n_inst + 5
assert [i.alive() for i in cstats] == [5, 3, 2]
del a
assert [i.alive() for i in cstats] == [4, 2, 2]
assert ConstructorStats.detail_reg_inst() == n_inst + 4
del b, c, e
assert [i.alive() for i in cstats] == [1, 0, 1]
assert ConstructorStats.detail_reg_inst() == n_inst + 1
del d
assert [i.alive() for i in cstats] == [0, 0, 0]
assert ConstructorStats.detail_reg_inst() == n_inst
assert [i.values() for i in cstats] == [
["4", "5", "6", "7", "8"],
["4", "5", "8"],
["6", "7"]
]
def test_init_factory_alias():
"""Tests py::init_factory() wrapper with value conversions and alias types"""
cstats = [m.TestFactory6.get_cstats(), m.TestFactory6.get_alias_cstats()]
cstats[0].alive() # force gc
n_inst = ConstructorStats.detail_reg_inst()
a = m.TestFactory6(tag.base, 1)
assert a.get() == 1
assert not a.has_alias()
b = m.TestFactory6(tag.alias, "hi there")
assert b.get() == 8
assert b.has_alias()
c = m.TestFactory6(tag.alias, 3)
assert c.get() == 3
assert c.has_alias()
d = m.TestFactory6(tag.alias, tag.pointer, 4)
assert d.get() == 4
assert d.has_alias()
e = m.TestFactory6(tag.base, tag.pointer, 5)
assert e.get() == 5
assert not e.has_alias()
f = m.TestFactory6(tag.base, tag.alias, tag.pointer, 6)
assert f.get() == 6
assert f.has_alias()
assert ConstructorStats.detail_reg_inst() == n_inst + 6
assert [i.alive() for i in cstats] == [6, 4]
del a, b, e
assert [i.alive() for i in cstats] == [3, 3]
assert ConstructorStats.detail_reg_inst() == n_inst + 3
del f, c, d
assert [i.alive() for i in cstats] == [0, 0]
assert ConstructorStats.detail_reg_inst() == n_inst
class MyTest(m.TestFactory6):
def __init__(self, *args):
m.TestFactory6.__init__(self, *args)
def get(self):
return -5 + m.TestFactory6.get(self)
# Return Class by value, moved into new alias:
z = MyTest(tag.base, 123)
assert z.get() == 118
assert z.has_alias()
# Return alias by value, moved into new alias:
y = MyTest(tag.alias, "why hello!")
assert y.get() == 5
assert y.has_alias()
# Return Class by pointer, moved into new alias then original destroyed:
x = MyTest(tag.base, tag.pointer, 47)
assert x.get() == 42
assert x.has_alias()
assert ConstructorStats.detail_reg_inst() == n_inst + 3
assert [i.alive() for i in cstats] == [3, 3]
del x, y, z
assert [i.alive() for i in cstats] == [0, 0]
assert ConstructorStats.detail_reg_inst() == n_inst
assert [i.values() for i in cstats] == [
["1", "8", "3", "4", "5", "6", "123", "10", "47"],
["hi there", "3", "4", "6", "move", "123", "why hello!", "move", "47"]
]
def test_init_factory_dual():
"""Tests init factory functions with dual main/alias factory functions"""
from pybind11_tests.factory_constructors import TestFactory7
cstats = [TestFactory7.get_cstats(), TestFactory7.get_alias_cstats()]
cstats[0].alive() # force gc
n_inst = ConstructorStats.detail_reg_inst()
class PythFactory7(TestFactory7):
def get(self):
return 100 + TestFactory7.get(self)
a1 = TestFactory7(1)
a2 = PythFactory7(2)
assert a1.get() == 1
assert a2.get() == 102
assert not a1.has_alias()
assert a2.has_alias()
b1 = TestFactory7(tag.pointer, 3)
b2 = PythFactory7(tag.pointer, 4)
assert b1.get() == 3
assert b2.get() == 104
assert not b1.has_alias()
assert b2.has_alias()
c1 = TestFactory7(tag.mixed, 5)
c2 = PythFactory7(tag.mixed, 6)
assert c1.get() == 5
assert c2.get() == 106
assert not c1.has_alias()
assert c2.has_alias()
d1 = TestFactory7(tag.base, tag.pointer, 7)
d2 = PythFactory7(tag.base, tag.pointer, 8)
assert d1.get() == 7
assert d2.get() == 108
assert not d1.has_alias()
assert d2.has_alias()
# Both return an alias; the second multiplies the value by 10:
e1 = TestFactory7(tag.alias, tag.pointer, 9)
e2 = PythFactory7(tag.alias, tag.pointer, 10)
assert e1.get() == 9
assert e2.get() == 200
assert e1.has_alias()
assert e2.has_alias()
f1 = TestFactory7(tag.shared_ptr, tag.base, 11)
f2 = PythFactory7(tag.shared_ptr, tag.base, 12)
assert f1.get() == 11
assert f2.get() == 112
assert not f1.has_alias()
assert f2.has_alias()
g1 = TestFactory7(tag.shared_ptr, tag.invalid_base, 13)
assert g1.get() == 13
assert not g1.has_alias()
with pytest.raises(TypeError) as excinfo:
PythFactory7(tag.shared_ptr, tag.invalid_base, 14)
assert (str(excinfo.value) ==
"pybind11::init(): construction failed: returned holder-wrapped instance is not an "
"alias instance")
assert [i.alive() for i in cstats] == [13, 7]
assert ConstructorStats.detail_reg_inst() == n_inst + 13
del a1, a2, b1, d1, e1, e2
assert [i.alive() for i in cstats] == [7, 4]
assert ConstructorStats.detail_reg_inst() == n_inst + 7
del b2, c1, c2, d2, f1, f2, g1
assert [i.alive() for i in cstats] == [0, 0]
assert ConstructorStats.detail_reg_inst() == n_inst
assert [i.values() for i in cstats] == [
["1", "2", "3", "4", "5", "6", "7", "8", "9", "100", "11", "12", "13", "14"],
["2", "4", "6", "8", "9", "100", "12"]
]
def test_no_placement_new(capture):
"""Prior to 2.2, `py::init<...>` relied on the type supporting placement
new; this tests a class without placement new support."""
with capture:
a = m.NoPlacementNew(123)
found = re.search(r'^operator new called, returning (\d+)\n$', str(capture))
assert found
assert a.i == 123
with capture:
del a
pytest.gc_collect()
assert capture == "operator delete called on " + found.group(1)
with capture:
b = m.NoPlacementNew()
found = re.search(r'^operator new called, returning (\d+)\n$', str(capture))
assert found
assert b.i == 100
with capture:
del b
pytest.gc_collect()
assert capture == "operator delete called on " + found.group(1)
def test_multiple_inheritance():
class MITest(m.TestFactory1, m.TestFactory2):
def __init__(self):
m.TestFactory1.__init__(self, tag.unique_ptr, 33)
m.TestFactory2.__init__(self, tag.move)
a = MITest()
assert m.TestFactory1.value.fget(a) == "33"
assert m.TestFactory2.value.fget(a) == "(empty2)"
def create_and_destroy(*args):
a = m.NoisyAlloc(*args)
print("---")
del a
pytest.gc_collect()
def strip_comments(s):
return re.sub(r'\s+#.*', '', s)
def test_reallocations(capture, msg):
"""When the constructor is overloaded, previous overloads can require a preallocated value.
This test makes sure that such preallocated values only happen when they might be necessary,
and that they are deallocated properly"""
pytest.gc_collect()
with capture:
create_and_destroy(1)
assert msg(capture) == """
noisy new
noisy placement new
NoisyAlloc(int 1)
---
~NoisyAlloc()
noisy delete
"""
with capture:
create_and_destroy(1.5)
assert msg(capture) == strip_comments("""
noisy new # allocation required to attempt first overload
noisy delete # have to dealloc before considering factory init overload
noisy new # pointer factory calling "new", part 1: allocation
NoisyAlloc(double 1.5) # ... part two, invoking constructor
---
~NoisyAlloc() # Destructor
noisy delete # operator delete
""")
with capture:
create_and_destroy(2, 3)
assert msg(capture) == strip_comments("""
noisy new # pointer factory calling "new", allocation
NoisyAlloc(int 2) # constructor
---
~NoisyAlloc() # Destructor
noisy delete # operator delete
""")
with capture:
create_and_destroy(2.5, 3)
assert msg(capture) == strip_comments("""
NoisyAlloc(double 2.5) # construction (local func variable: operator_new not called)
noisy new # return-by-value "new" part 1: allocation
~NoisyAlloc() # moved-away local func variable destruction
---
~NoisyAlloc() # Destructor
noisy delete # operator delete
""")
with capture:
create_and_destroy(3.5, 4.5)
assert msg(capture) == strip_comments("""
noisy new # preallocation needed before invoking placement-new overload
noisy placement new # Placement new
NoisyAlloc(double 3.5) # construction
---
~NoisyAlloc() # Destructor
noisy delete # operator delete
""")
with capture:
create_and_destroy(4, 0.5)
assert msg(capture) == strip_comments("""
noisy new # preallocation needed before invoking placement-new overload
noisy delete # deallocation of preallocated storage
noisy new # Factory pointer allocation
NoisyAlloc(int 4) # factory pointer construction
---
~NoisyAlloc() # Destructor
noisy delete # operator delete
""")
with capture:
create_and_destroy(5, "hi")
assert msg(capture) == strip_comments("""
noisy new # preallocation needed before invoking first placement new
noisy delete # delete before considering new-style constructor
noisy new # preallocation for second placement new
noisy placement new # Placement new in the second placement new overload
NoisyAlloc(int 5) # construction
---
~NoisyAlloc() # Destructor
noisy delete # operator delete
""")
@pytest.unsupported_on_py2
def test_invalid_self():
"""Tests invocation of the pybind-registered base class with an invalid `self` argument. You
can only actually do this on Python 3: Python 2 raises an exception itself if you try."""
class NotPybindDerived(object):
pass
# Attempts to initialize with an invalid type passed as `self`:
class BrokenTF1(m.TestFactory1):
def __init__(self, bad):
if bad == 1:
a = m.TestFactory2(tag.pointer, 1)
m.TestFactory1.__init__(a, tag.pointer)
elif bad == 2:
a = NotPybindDerived()
m.TestFactory1.__init__(a, tag.pointer)
# Same as above, but for a class with an alias:
class BrokenTF6(m.TestFactory6):
def __init__(self, bad):
if bad == 1:
a = m.TestFactory2(tag.pointer, 1)
m.TestFactory6.__init__(a, tag.base, 1)
elif bad == 2:
a = m.TestFactory2(tag.pointer, 1)
m.TestFactory6.__init__(a, tag.alias, 1)
elif bad == 3:
m.TestFactory6.__init__(NotPybindDerived.__new__(NotPybindDerived), tag.base, 1)
elif bad == 4:
m.TestFactory6.__init__(NotPybindDerived.__new__(NotPybindDerived), tag.alias, 1)
for arg in (1, 2):
with pytest.raises(TypeError) as excinfo:
BrokenTF1(arg)
assert str(excinfo.value) == "__init__(self, ...) called with invalid `self` argument"
for arg in (1, 2, 3, 4):
with pytest.raises(TypeError) as excinfo:
BrokenTF6(arg)
assert str(excinfo.value) == "__init__(self, ...) called with invalid `self` argument"
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_factory_constructors.py | test_factory_constructors.py |
from __future__ import division
import pytest
import sys
from pybind11_tests import pytypes as m
from pybind11_tests import debug_enabled
def test_list(capture, doc):
with capture:
lst = m.get_list()
assert lst == ["inserted-0", "overwritten", "inserted-2"]
lst.append("value2")
m.print_list(lst)
assert capture.unordered == """
Entry at position 0: value
list item 0: inserted-0
list item 1: overwritten
list item 2: inserted-2
list item 3: value2
"""
assert doc(m.get_list) == "get_list() -> list"
assert doc(m.print_list) == "print_list(arg0: list) -> None"
def test_set(capture, doc):
s = m.get_set()
assert s == {"key1", "key2", "key3"}
with capture:
s.add("key4")
m.print_set(s)
assert capture.unordered == """
key: key1
key: key2
key: key3
key: key4
"""
assert not m.set_contains(set([]), 42)
assert m.set_contains({42}, 42)
assert m.set_contains({"foo"}, "foo")
assert doc(m.get_list) == "get_list() -> list"
assert doc(m.print_list) == "print_list(arg0: list) -> None"
def test_dict(capture, doc):
d = m.get_dict()
assert d == {"key": "value"}
with capture:
d["key2"] = "value2"
m.print_dict(d)
assert capture.unordered == """
key: key, value=value
key: key2, value=value2
"""
assert not m.dict_contains({}, 42)
assert m.dict_contains({42: None}, 42)
assert m.dict_contains({"foo": None}, "foo")
assert doc(m.get_dict) == "get_dict() -> dict"
assert doc(m.print_dict) == "print_dict(arg0: dict) -> None"
assert m.dict_keyword_constructor() == {"x": 1, "y": 2, "z": 3}
def test_str(doc):
assert m.str_from_string().encode().decode() == "baz"
assert m.str_from_bytes().encode().decode() == "boo"
assert doc(m.str_from_bytes) == "str_from_bytes() -> str"
class A(object):
def __str__(self):
return "this is a str"
def __repr__(self):
return "this is a repr"
assert m.str_from_object(A()) == "this is a str"
assert m.repr_from_object(A()) == "this is a repr"
s1, s2 = m.str_format()
assert s1 == "1 + 2 = 3"
assert s1 == s2
def test_bytes(doc):
assert m.bytes_from_string().decode() == "foo"
assert m.bytes_from_str().decode() == "bar"
assert doc(m.bytes_from_str) == "bytes_from_str() -> {}".format(
"bytes" if sys.version_info[0] == 3 else "str"
)
def test_capsule(capture):
pytest.gc_collect()
with capture:
a = m.return_capsule_with_destructor()
del a
pytest.gc_collect()
assert capture.unordered == """
creating capsule
destructing capsule
"""
with capture:
a = m.return_capsule_with_destructor_2()
del a
pytest.gc_collect()
assert capture.unordered == """
creating capsule
destructing capsule: 1234
"""
with capture:
a = m.return_capsule_with_name_and_destructor()
del a
pytest.gc_collect()
assert capture.unordered == """
created capsule (1234, 'pointer type description')
destructing capsule (1234, 'pointer type description')
"""
def test_accessors():
class SubTestObject:
attr_obj = 1
attr_char = 2
class TestObject:
basic_attr = 1
begin_end = [1, 2, 3]
d = {"operator[object]": 1, "operator[char *]": 2}
sub = SubTestObject()
def func(self, x, *args):
return self.basic_attr + x + sum(args)
d = m.accessor_api(TestObject())
assert d["basic_attr"] == 1
assert d["begin_end"] == [1, 2, 3]
assert d["operator[object]"] == 1
assert d["operator[char *]"] == 2
assert d["attr(object)"] == 1
assert d["attr(char *)"] == 2
assert d["missing_attr_ptr"] == "raised"
assert d["missing_attr_chain"] == "raised"
assert d["is_none"] is False
assert d["operator()"] == 2
assert d["operator*"] == 7
assert d["implicit_list"] == [1, 2, 3]
assert all(x in TestObject.__dict__ for x in d["implicit_dict"])
assert m.tuple_accessor(tuple()) == (0, 1, 2)
d = m.accessor_assignment()
assert d["get"] == 0
assert d["deferred_get"] == 0
assert d["set"] == 1
assert d["deferred_set"] == 1
assert d["var"] == 99
def test_constructors():
"""C++ default and converting constructors are equivalent to type calls in Python"""
types = [str, bool, int, float, tuple, list, dict, set]
expected = {t.__name__: t() for t in types}
assert m.default_constructors() == expected
data = {
str: 42,
bool: "Not empty",
int: "42",
float: "+1e3",
tuple: range(3),
list: range(3),
dict: [("two", 2), ("one", 1), ("three", 3)],
set: [4, 4, 5, 6, 6, 6],
memoryview: b'abc'
}
inputs = {k.__name__: v for k, v in data.items()}
expected = {k.__name__: k(v) for k, v in data.items()}
assert m.converting_constructors(inputs) == expected
assert m.cast_functions(inputs) == expected
# Converting constructors and cast functions should just reference rather
# than copy when no conversion is needed:
noconv1 = m.converting_constructors(expected)
for k in noconv1:
assert noconv1[k] is expected[k]
noconv2 = m.cast_functions(expected)
for k in noconv2:
assert noconv2[k] is expected[k]
def test_implicit_casting():
"""Tests implicit casting when assigning or appending to dicts and lists."""
z = m.get_implicit_casting()
assert z['d'] == {
'char*_i1': 'abc', 'char*_i2': 'abc', 'char*_e': 'abc', 'char*_p': 'abc',
'str_i1': 'str', 'str_i2': 'str1', 'str_e': 'str2', 'str_p': 'str3',
'int_i1': 42, 'int_i2': 42, 'int_e': 43, 'int_p': 44
}
assert z['l'] == [3, 6, 9, 12, 15]
def test_print(capture):
with capture:
m.print_function()
assert capture == """
Hello, World!
1 2.0 three True -- multiple args
*args-and-a-custom-separator
no new line here -- next print
flush
py::print + str.format = this
"""
assert capture.stderr == "this goes to stderr"
with pytest.raises(RuntimeError) as excinfo:
m.print_failure()
assert str(excinfo.value) == "make_tuple(): unable to convert " + (
"argument of type 'UnregisteredType' to Python object"
if debug_enabled else
"arguments to Python object (compile in debug mode for details)"
)
def test_hash():
class Hashable(object):
def __init__(self, value):
self.value = value
def __hash__(self):
return self.value
class Unhashable(object):
__hash__ = None
assert m.hash_function(Hashable(42)) == 42
with pytest.raises(TypeError):
m.hash_function(Unhashable())
def test_number_protocol():
for a, b in [(1, 1), (3, 5)]:
li = [a == b, a != b, a < b, a <= b, a > b, a >= b, a + b,
a - b, a * b, a / b, a | b, a & b, a ^ b, a >> b, a << b]
assert m.test_number_protocol(a, b) == li
def test_list_slicing():
li = list(range(100))
assert li[::2] == m.test_list_slicing(li)
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_pytypes.py | test_pytypes.py |
import pytest
from pybind11_tests import exceptions as m
import pybind11_cross_module_tests as cm
def test_std_exception(msg):
with pytest.raises(RuntimeError) as excinfo:
m.throw_std_exception()
assert msg(excinfo.value) == "This exception was intentionally thrown."
def test_error_already_set(msg):
with pytest.raises(RuntimeError) as excinfo:
m.throw_already_set(False)
assert msg(excinfo.value) == "Unknown internal error occurred"
with pytest.raises(ValueError) as excinfo:
m.throw_already_set(True)
assert msg(excinfo.value) == "foo"
def test_cross_module_exceptions():
with pytest.raises(RuntimeError) as excinfo:
cm.raise_runtime_error()
assert str(excinfo.value) == "My runtime error"
with pytest.raises(ValueError) as excinfo:
cm.raise_value_error()
assert str(excinfo.value) == "My value error"
with pytest.raises(ValueError) as excinfo:
cm.throw_pybind_value_error()
assert str(excinfo.value) == "pybind11 value error"
with pytest.raises(TypeError) as excinfo:
cm.throw_pybind_type_error()
assert str(excinfo.value) == "pybind11 type error"
with pytest.raises(StopIteration) as excinfo:
cm.throw_stop_iteration()
def test_python_call_in_catch():
d = {}
assert m.python_call_in_destructor(d) is True
assert d["good"] is True
def test_exception_matches():
assert m.exception_matches()
assert m.exception_matches_base()
assert m.modulenotfound_exception_matches_base()
def test_custom(msg):
# Can we catch a MyException?
with pytest.raises(m.MyException) as excinfo:
m.throws1()
assert msg(excinfo.value) == "this error should go to a custom type"
# Can we translate to standard Python exceptions?
with pytest.raises(RuntimeError) as excinfo:
m.throws2()
assert msg(excinfo.value) == "this error should go to a standard Python exception"
# Can we handle unknown exceptions?
with pytest.raises(RuntimeError) as excinfo:
m.throws3()
assert msg(excinfo.value) == "Caught an unknown exception!"
# Can we delegate to another handler by rethrowing?
with pytest.raises(m.MyException) as excinfo:
m.throws4()
assert msg(excinfo.value) == "this error is rethrown"
# Can we fall-through to the default handler?
with pytest.raises(RuntimeError) as excinfo:
m.throws_logic_error()
assert msg(excinfo.value) == "this error should fall through to the standard handler"
# OverFlow error translation.
with pytest.raises(OverflowError) as excinfo:
m.throws_overflow_error()
# Can we handle a helper-declared exception?
with pytest.raises(m.MyException5) as excinfo:
m.throws5()
assert msg(excinfo.value) == "this is a helper-defined translated exception"
# Exception subclassing:
with pytest.raises(m.MyException5) as excinfo:
m.throws5_1()
assert msg(excinfo.value) == "MyException5 subclass"
assert isinstance(excinfo.value, m.MyException5_1)
with pytest.raises(m.MyException5_1) as excinfo:
m.throws5_1()
assert msg(excinfo.value) == "MyException5 subclass"
with pytest.raises(m.MyException5) as excinfo:
try:
m.throws5()
except m.MyException5_1:
raise RuntimeError("Exception error: caught child from parent")
assert msg(excinfo.value) == "this is a helper-defined translated exception"
def test_nested_throws(capture):
"""Tests nested (e.g. C++ -> Python -> C++) exception handling"""
def throw_myex():
raise m.MyException("nested error")
def throw_myex5():
raise m.MyException5("nested error 5")
# In the comments below, the exception is caught in the first step, thrown in the last step
# C++ -> Python
with capture:
m.try_catch(m.MyException5, throw_myex5)
assert str(capture).startswith("MyException5: nested error 5")
# Python -> C++ -> Python
with pytest.raises(m.MyException) as excinfo:
m.try_catch(m.MyException5, throw_myex)
assert str(excinfo.value) == "nested error"
def pycatch(exctype, f, *args):
try:
f(*args)
except m.MyException as e:
print(e)
# C++ -> Python -> C++ -> Python
with capture:
m.try_catch(
m.MyException5, pycatch, m.MyException, m.try_catch, m.MyException, throw_myex5)
assert str(capture).startswith("MyException5: nested error 5")
# C++ -> Python -> C++
with capture:
m.try_catch(m.MyException, pycatch, m.MyException5, m.throws4)
assert capture == "this error is rethrown"
# Python -> C++ -> Python -> C++
with pytest.raises(m.MyException5) as excinfo:
m.try_catch(m.MyException, pycatch, m.MyException, m.throws5)
assert str(excinfo.value) == "this is a helper-defined translated exception"
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_exceptions.py | test_exceptions.py |
from pybind11_tests import modules as m
from pybind11_tests.modules import subsubmodule as ms
from pybind11_tests import ConstructorStats
def test_nested_modules():
import pybind11_tests
assert pybind11_tests.__name__ == "pybind11_tests"
assert pybind11_tests.modules.__name__ == "pybind11_tests.modules"
assert pybind11_tests.modules.subsubmodule.__name__ == "pybind11_tests.modules.subsubmodule"
assert m.__name__ == "pybind11_tests.modules"
assert ms.__name__ == "pybind11_tests.modules.subsubmodule"
assert ms.submodule_func() == "submodule_func()"
def test_reference_internal():
b = ms.B()
assert str(b.get_a1()) == "A[1]"
assert str(b.a1) == "A[1]"
assert str(b.get_a2()) == "A[2]"
assert str(b.a2) == "A[2]"
b.a1 = ms.A(42)
b.a2 = ms.A(43)
assert str(b.get_a1()) == "A[42]"
assert str(b.a1) == "A[42]"
assert str(b.get_a2()) == "A[43]"
assert str(b.a2) == "A[43]"
astats, bstats = ConstructorStats.get(ms.A), ConstructorStats.get(ms.B)
assert astats.alive() == 2
assert bstats.alive() == 1
del b
assert astats.alive() == 0
assert bstats.alive() == 0
assert astats.values() == ['1', '2', '42', '43']
assert bstats.values() == []
assert astats.default_constructions == 0
assert bstats.default_constructions == 1
assert astats.copy_constructions == 0
assert bstats.copy_constructions == 0
# assert astats.move_constructions >= 0 # Don't invoke any
# assert bstats.move_constructions >= 0 # Don't invoke any
assert astats.copy_assignments == 2
assert bstats.copy_assignments == 0
assert astats.move_assignments == 0
assert bstats.move_assignments == 0
def test_importing():
from pybind11_tests.modules import OD
from collections import OrderedDict
assert OD is OrderedDict
assert str(OD([(1, 'a'), (2, 'b')])) == "OrderedDict([(1, 'a'), (2, 'b')])"
def test_pydoc():
"""Pydoc needs to be able to provide help() for everything inside a pybind11 module"""
import pybind11_tests
import pydoc
assert pybind11_tests.__name__ == "pybind11_tests"
assert pybind11_tests.__doc__ == "pybind11 test module"
assert pydoc.text.docmodule(pybind11_tests)
def test_duplicate_registration():
"""Registering two things with the same name"""
assert m.duplicate_registration() == []
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_modules.py | test_modules.py |
import os
from pybind11_tests import eval_ as m
def test_evals(capture):
with capture:
assert m.test_eval_statements()
assert capture == "Hello World!"
assert m.test_eval()
assert m.test_eval_single_statement()
filename = os.path.join(os.path.dirname(__file__), "test_eval_call.py")
assert m.test_eval_file(filename)
assert m.test_eval_failure()
assert m.test_eval_file_failure()
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_eval.py | test_eval.py |
import pytest
from pybind11_tests import kwargs_and_defaults as m
def test_function_signatures(doc):
assert doc(m.kw_func0) == "kw_func0(arg0: int, arg1: int) -> str"
assert doc(m.kw_func1) == "kw_func1(x: int, y: int) -> str"
assert doc(m.kw_func2) == "kw_func2(x: int = 100, y: int = 200) -> str"
assert doc(m.kw_func3) == "kw_func3(data: str = 'Hello world!') -> None"
assert doc(m.kw_func4) == "kw_func4(myList: List[int] = [13, 17]) -> str"
assert doc(m.kw_func_udl) == "kw_func_udl(x: int, y: int = 300) -> str"
assert doc(m.kw_func_udl_z) == "kw_func_udl_z(x: int, y: int = 0) -> str"
assert doc(m.args_function) == "args_function(*args) -> tuple"
assert doc(m.args_kwargs_function) == "args_kwargs_function(*args, **kwargs) -> tuple"
assert doc(m.KWClass.foo0) == \
"foo0(self: m.kwargs_and_defaults.KWClass, arg0: int, arg1: float) -> None"
assert doc(m.KWClass.foo1) == \
"foo1(self: m.kwargs_and_defaults.KWClass, x: int, y: float) -> None"
def test_named_arguments(msg):
assert m.kw_func0(5, 10) == "x=5, y=10"
assert m.kw_func1(5, 10) == "x=5, y=10"
assert m.kw_func1(5, y=10) == "x=5, y=10"
assert m.kw_func1(y=10, x=5) == "x=5, y=10"
assert m.kw_func2() == "x=100, y=200"
assert m.kw_func2(5) == "x=5, y=200"
assert m.kw_func2(x=5) == "x=5, y=200"
assert m.kw_func2(y=10) == "x=100, y=10"
assert m.kw_func2(5, 10) == "x=5, y=10"
assert m.kw_func2(x=5, y=10) == "x=5, y=10"
with pytest.raises(TypeError) as excinfo:
# noinspection PyArgumentList
m.kw_func2(x=5, y=10, z=12)
assert excinfo.match(
r'(?s)^kw_func2\(\): incompatible.*Invoked with: kwargs: ((x=5|y=10|z=12)(, |$))' + '{3}$')
assert m.kw_func4() == "{13 17}"
assert m.kw_func4(myList=[1, 2, 3]) == "{1 2 3}"
assert m.kw_func_udl(x=5, y=10) == "x=5, y=10"
assert m.kw_func_udl_z(x=5) == "x=5, y=0"
def test_arg_and_kwargs():
args = 'arg1_value', 'arg2_value', 3
assert m.args_function(*args) == args
args = 'a1', 'a2'
kwargs = dict(arg3='a3', arg4=4)
assert m.args_kwargs_function(*args, **kwargs) == (args, kwargs)
def test_mixed_args_and_kwargs(msg):
mpa = m.mixed_plus_args
mpk = m.mixed_plus_kwargs
mpak = m.mixed_plus_args_kwargs
mpakd = m.mixed_plus_args_kwargs_defaults
assert mpa(1, 2.5, 4, 99.5, None) == (1, 2.5, (4, 99.5, None))
assert mpa(1, 2.5) == (1, 2.5, ())
with pytest.raises(TypeError) as excinfo:
assert mpa(1)
assert msg(excinfo.value) == """
mixed_plus_args(): incompatible function arguments. The following argument types are supported:
1. (arg0: int, arg1: float, *args) -> tuple
Invoked with: 1
""" # noqa: E501 line too long
with pytest.raises(TypeError) as excinfo:
assert mpa()
assert msg(excinfo.value) == """
mixed_plus_args(): incompatible function arguments. The following argument types are supported:
1. (arg0: int, arg1: float, *args) -> tuple
Invoked with:
""" # noqa: E501 line too long
assert mpk(-2, 3.5, pi=3.14159, e=2.71828) == (-2, 3.5, {'e': 2.71828, 'pi': 3.14159})
assert mpak(7, 7.7, 7.77, 7.777, 7.7777, minusseven=-7) == (
7, 7.7, (7.77, 7.777, 7.7777), {'minusseven': -7})
assert mpakd() == (1, 3.14159, (), {})
assert mpakd(3) == (3, 3.14159, (), {})
assert mpakd(j=2.71828) == (1, 2.71828, (), {})
assert mpakd(k=42) == (1, 3.14159, (), {'k': 42})
assert mpakd(1, 1, 2, 3, 5, 8, then=13, followedby=21) == (
1, 1, (2, 3, 5, 8), {'then': 13, 'followedby': 21})
# Arguments specified both positionally and via kwargs should fail:
with pytest.raises(TypeError) as excinfo:
assert mpakd(1, i=1)
assert msg(excinfo.value) == """
mixed_plus_args_kwargs_defaults(): incompatible function arguments. The following argument types are supported:
1. (i: int = 1, j: float = 3.14159, *args, **kwargs) -> tuple
Invoked with: 1; kwargs: i=1
""" # noqa: E501 line too long
with pytest.raises(TypeError) as excinfo:
assert mpakd(1, 2, j=1)
assert msg(excinfo.value) == """
mixed_plus_args_kwargs_defaults(): incompatible function arguments. The following argument types are supported:
1. (i: int = 1, j: float = 3.14159, *args, **kwargs) -> tuple
Invoked with: 1, 2; kwargs: j=1
""" # noqa: E501 line too long
def test_args_refcount():
"""Issue/PR #1216 - py::args elements get double-inc_ref()ed when combined with regular
arguments"""
refcount = m.arg_refcount_h
myval = 54321
expected = refcount(myval)
assert m.arg_refcount_h(myval) == expected
assert m.arg_refcount_o(myval) == expected + 1
assert m.arg_refcount_h(myval) == expected
assert refcount(myval) == expected
assert m.mixed_plus_args(1, 2.0, "a", myval) == (1, 2.0, ("a", myval))
assert refcount(myval) == expected
assert m.mixed_plus_kwargs(3, 4.0, a=1, b=myval) == (3, 4.0, {"a": 1, "b": myval})
assert refcount(myval) == expected
assert m.args_function(-1, myval) == (-1, myval)
assert refcount(myval) == expected
assert m.mixed_plus_args_kwargs(5, 6.0, myval, a=myval) == (5, 6.0, (myval,), {"a": myval})
assert refcount(myval) == expected
assert m.args_kwargs_function(7, 8, myval, a=1, b=myval) == \
((7, 8, myval), {"a": 1, "b": myval})
assert refcount(myval) == expected
exp3 = refcount(myval, myval, myval)
assert m.args_refcount(myval, myval, myval) == (exp3, exp3, exp3)
assert refcount(myval) == expected
# This function takes the first arg as a `py::object` and the rest as a `py::args`. Unlike the
# previous case, when we have both positional and `py::args` we need to construct a new tuple
# for the `py::args`; in the previous case, we could simply inc_ref and pass on Python's input
# tuple without having to inc_ref the individual elements, but here we can't, hence the extra
# refs.
assert m.mixed_args_refcount(myval, myval, myval) == (exp3 + 3, exp3 + 3, exp3 + 3)
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_kwargs_and_defaults.py | test_kwargs_and_defaults.py |
from pybind11_tests import iostream as m
import sys
from contextlib import contextmanager
try:
# Python 3
from io import StringIO
except ImportError:
# Python 2
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
# Python 3.4
from contextlib import redirect_stdout
except ImportError:
@contextmanager
def redirect_stdout(target):
original = sys.stdout
sys.stdout = target
yield
sys.stdout = original
try:
# Python 3.5
from contextlib import redirect_stderr
except ImportError:
@contextmanager
def redirect_stderr(target):
original = sys.stderr
sys.stderr = target
yield
sys.stderr = original
def test_captured(capsys):
msg = "I've been redirected to Python, I hope!"
m.captured_output(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ''
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ''
m.captured_err(msg)
stdout, stderr = capsys.readouterr()
assert stdout == ''
assert stderr == msg
def test_captured_large_string(capsys):
# Make this bigger than the buffer used on the C++ side: 1024 chars
msg = "I've been redirected to Python, I hope!"
msg = msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ''
def test_guard_capture(capsys):
msg = "I've been redirected to Python, I hope!"
m.guard_output(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ''
def test_series_captured(capture):
with capture:
m.captured_output("a")
m.captured_output("b")
assert capture == "ab"
def test_flush(capfd):
msg = "(not flushed)"
msg2 = "(flushed)"
with m.ostream_redirect():
m.noisy_function(msg, flush=False)
stdout, stderr = capfd.readouterr()
assert stdout == ''
m.noisy_function(msg2, flush=True)
stdout, stderr = capfd.readouterr()
assert stdout == msg + msg2
m.noisy_function(msg, flush=False)
stdout, stderr = capfd.readouterr()
assert stdout == msg
def test_not_captured(capfd):
msg = "Something that should not show up in log"
stream = StringIO()
with redirect_stdout(stream):
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == msg
assert stderr == ''
assert stream.getvalue() == ''
stream = StringIO()
with redirect_stdout(stream):
m.captured_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == ''
assert stderr == ''
assert stream.getvalue() == msg
def test_err(capfd):
msg = "Something that should not show up in log"
stream = StringIO()
with redirect_stderr(stream):
m.raw_err(msg)
stdout, stderr = capfd.readouterr()
assert stdout == ''
assert stderr == msg
assert stream.getvalue() == ''
stream = StringIO()
with redirect_stderr(stream):
m.captured_err(msg)
stdout, stderr = capfd.readouterr()
assert stdout == ''
assert stderr == ''
assert stream.getvalue() == msg
def test_multi_captured(capfd):
stream = StringIO()
with redirect_stdout(stream):
m.captured_output("a")
m.raw_output("b")
m.captured_output("c")
m.raw_output("d")
stdout, stderr = capfd.readouterr()
assert stdout == 'bd'
assert stream.getvalue() == 'ac'
def test_dual(capsys):
m.captured_dual("a", "b")
stdout, stderr = capsys.readouterr()
assert stdout == "a"
assert stderr == "b"
def test_redirect(capfd):
msg = "Should not be in log!"
stream = StringIO()
with redirect_stdout(stream):
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == msg
assert stream.getvalue() == ''
stream = StringIO()
with redirect_stdout(stream):
with m.ostream_redirect():
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == ''
assert stream.getvalue() == msg
stream = StringIO()
with redirect_stdout(stream):
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == msg
assert stream.getvalue() == ''
def test_redirect_err(capfd):
msg = "StdOut"
msg2 = "StdErr"
stream = StringIO()
with redirect_stderr(stream):
with m.ostream_redirect(stdout=False):
m.raw_output(msg)
m.raw_err(msg2)
stdout, stderr = capfd.readouterr()
assert stdout == msg
assert stderr == ''
assert stream.getvalue() == msg2
def test_redirect_both(capfd):
msg = "StdOut"
msg2 = "StdErr"
stream = StringIO()
stream2 = StringIO()
with redirect_stdout(stream):
with redirect_stderr(stream2):
with m.ostream_redirect():
m.raw_output(msg)
m.raw_err(msg2)
stdout, stderr = capfd.readouterr()
assert stdout == ''
assert stderr == ''
assert stream.getvalue() == msg
assert stream2.getvalue() == msg2
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_iostream.py | test_iostream.py |
import pytest
from pybind11_tests import stl as m
from pybind11_tests import UserType
from pybind11_tests import ConstructorStats
def test_vector(doc):
"""std::vector <-> list"""
lst = m.cast_vector()
assert lst == [1]
lst.append(2)
assert m.load_vector(lst)
assert m.load_vector(tuple(lst))
assert m.cast_bool_vector() == [True, False]
assert m.load_bool_vector([True, False])
assert doc(m.cast_vector) == "cast_vector() -> List[int]"
assert doc(m.load_vector) == "load_vector(arg0: List[int]) -> bool"
# Test regression caused by 936: pointers to stl containers weren't castable
assert m.cast_ptr_vector() == ["lvalue", "lvalue"]
def test_deque(doc):
"""std::deque <-> list"""
lst = m.cast_deque()
assert lst == [1]
lst.append(2)
assert m.load_deque(lst)
assert m.load_deque(tuple(lst))
def test_array(doc):
"""std::array <-> list"""
lst = m.cast_array()
assert lst == [1, 2]
assert m.load_array(lst)
assert doc(m.cast_array) == "cast_array() -> List[int[2]]"
assert doc(m.load_array) == "load_array(arg0: List[int[2]]) -> bool"
def test_valarray(doc):
"""std::valarray <-> list"""
lst = m.cast_valarray()
assert lst == [1, 4, 9]
assert m.load_valarray(lst)
assert doc(m.cast_valarray) == "cast_valarray() -> List[int]"
assert doc(m.load_valarray) == "load_valarray(arg0: List[int]) -> bool"
def test_map(doc):
"""std::map <-> dict"""
d = m.cast_map()
assert d == {"key": "value"}
assert "key" in d
d["key2"] = "value2"
assert "key2" in d
assert m.load_map(d)
assert doc(m.cast_map) == "cast_map() -> Dict[str, str]"
assert doc(m.load_map) == "load_map(arg0: Dict[str, str]) -> bool"
def test_set(doc):
"""std::set <-> set"""
s = m.cast_set()
assert s == {"key1", "key2"}
s.add("key3")
assert m.load_set(s)
assert doc(m.cast_set) == "cast_set() -> Set[str]"
assert doc(m.load_set) == "load_set(arg0: Set[str]) -> bool"
def test_recursive_casting():
"""Tests that stl casters preserve lvalue/rvalue context for container values"""
assert m.cast_rv_vector() == ["rvalue", "rvalue"]
assert m.cast_lv_vector() == ["lvalue", "lvalue"]
assert m.cast_rv_array() == ["rvalue", "rvalue", "rvalue"]
assert m.cast_lv_array() == ["lvalue", "lvalue"]
assert m.cast_rv_map() == {"a": "rvalue"}
assert m.cast_lv_map() == {"a": "lvalue", "b": "lvalue"}
assert m.cast_rv_nested() == [[[{"b": "rvalue", "c": "rvalue"}], [{"a": "rvalue"}]]]
assert m.cast_lv_nested() == {
"a": [[["lvalue", "lvalue"]], [["lvalue", "lvalue"]]],
"b": [[["lvalue", "lvalue"], ["lvalue", "lvalue"]]]
}
# Issue #853 test case:
z = m.cast_unique_ptr_vector()
assert z[0].value == 7 and z[1].value == 42
def test_move_out_container():
"""Properties use the `reference_internal` policy by default. If the underlying function
returns an rvalue, the policy is automatically changed to `move` to avoid referencing
a temporary. In case the return value is a container of user-defined types, the policy
also needs to be applied to the elements, not just the container."""
c = m.MoveOutContainer()
moved_out_list = c.move_list
assert [x.value for x in moved_out_list] == [0, 1, 2]
@pytest.mark.skipif(not hasattr(m, "has_optional"), reason='no <optional>')
def test_optional():
assert m.double_or_zero(None) == 0
assert m.double_or_zero(42) == 84
pytest.raises(TypeError, m.double_or_zero, 'foo')
assert m.half_or_none(0) is None
assert m.half_or_none(42) == 21
pytest.raises(TypeError, m.half_or_none, 'foo')
assert m.test_nullopt() == 42
assert m.test_nullopt(None) == 42
assert m.test_nullopt(42) == 42
assert m.test_nullopt(43) == 43
assert m.test_no_assign() == 42
assert m.test_no_assign(None) == 42
assert m.test_no_assign(m.NoAssign(43)) == 43
pytest.raises(TypeError, m.test_no_assign, 43)
assert m.nodefer_none_optional(None)
@pytest.mark.skipif(not hasattr(m, "has_exp_optional"), reason='no <experimental/optional>')
def test_exp_optional():
assert m.double_or_zero_exp(None) == 0
assert m.double_or_zero_exp(42) == 84
pytest.raises(TypeError, m.double_or_zero_exp, 'foo')
assert m.half_or_none_exp(0) is None
assert m.half_or_none_exp(42) == 21
pytest.raises(TypeError, m.half_or_none_exp, 'foo')
assert m.test_nullopt_exp() == 42
assert m.test_nullopt_exp(None) == 42
assert m.test_nullopt_exp(42) == 42
assert m.test_nullopt_exp(43) == 43
assert m.test_no_assign_exp() == 42
assert m.test_no_assign_exp(None) == 42
assert m.test_no_assign_exp(m.NoAssign(43)) == 43
pytest.raises(TypeError, m.test_no_assign_exp, 43)
@pytest.mark.skipif(not hasattr(m, "load_variant"), reason='no <variant>')
def test_variant(doc):
assert m.load_variant(1) == "int"
assert m.load_variant("1") == "std::string"
assert m.load_variant(1.0) == "double"
assert m.load_variant(None) == "std::nullptr_t"
assert m.load_variant_2pass(1) == "int"
assert m.load_variant_2pass(1.0) == "double"
assert m.cast_variant() == (5, "Hello")
assert doc(m.load_variant) == "load_variant(arg0: Union[int, str, float, None]) -> str"
def test_vec_of_reference_wrapper():
"""#171: Can't return reference wrappers (or STL structures containing them)"""
assert str(m.return_vec_of_reference_wrapper(UserType(4))) == \
"[UserType(1), UserType(2), UserType(3), UserType(4)]"
def test_stl_pass_by_pointer(msg):
"""Passing nullptr or None to an STL container pointer is not expected to work"""
with pytest.raises(TypeError) as excinfo:
m.stl_pass_by_pointer() # default value is `nullptr`
assert msg(excinfo.value) == """
stl_pass_by_pointer(): incompatible function arguments. The following argument types are supported:
1. (v: List[int] = None) -> List[int]
Invoked with:
""" # noqa: E501 line too long
with pytest.raises(TypeError) as excinfo:
m.stl_pass_by_pointer(None)
assert msg(excinfo.value) == """
stl_pass_by_pointer(): incompatible function arguments. The following argument types are supported:
1. (v: List[int] = None) -> List[int]
Invoked with: None
""" # noqa: E501 line too long
assert m.stl_pass_by_pointer([1, 2, 3]) == [1, 2, 3]
def test_missing_header_message():
"""Trying convert `list` to a `std::vector`, or vice versa, without including
<pybind11/stl.h> should result in a helpful suggestion in the error message"""
import pybind11_cross_module_tests as cm
expected_message = ("Did you forget to `#include <pybind11/stl.h>`? Or <pybind11/complex.h>,\n"
"<pybind11/functional.h>, <pybind11/chrono.h>, etc. Some automatic\n"
"conversions are optional and require extra headers to be included\n"
"when compiling your pybind11 module.")
with pytest.raises(TypeError) as excinfo:
cm.missing_header_arg([1.0, 2.0, 3.0])
assert expected_message in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
cm.missing_header_return()
assert expected_message in str(excinfo.value)
def test_function_with_string_and_vector_string_arg():
"""Check if a string is NOT implicitly converted to a list, which was the
behavior before fix of issue #1258"""
assert m.func_with_string_or_vector_string_arg_overload(('A', 'B', )) == 2
assert m.func_with_string_or_vector_string_arg_overload(['A', 'B']) == 2
assert m.func_with_string_or_vector_string_arg_overload('A') == 3
def test_stl_ownership():
cstats = ConstructorStats.get(m.Placeholder)
assert cstats.alive() == 0
r = m.test_stl_ownership()
assert len(r) == 1
del r
assert cstats.alive() == 0
def test_array_cast_sequence():
assert m.array_cast_sequence((1, 2, 3)) == [1, 2, 3]
def test_issue_1561():
""" check fix for issue #1561 """
bar = m.Issue1561Outer()
bar.list = [m.Issue1561Inner('bar')]
bar.list
assert bar.list[0].data == 'bar'
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_stl.py | test_stl.py |
import multiprocessing
import threading
from pybind11_tests import gil_scoped as m
def _run_in_process(target, *args, **kwargs):
"""Runs target in process and returns its exitcode after 10s (None if still alive)."""
process = multiprocessing.Process(target=target, args=args, kwargs=kwargs)
process.daemon = True
try:
process.start()
# Do not need to wait much, 10s should be more than enough.
process.join(timeout=10)
return process.exitcode
finally:
if process.is_alive():
process.terminate()
def _python_to_cpp_to_python():
"""Calls different C++ functions that come back to Python."""
class ExtendedVirtClass(m.VirtClass):
def virtual_func(self):
pass
def pure_virtual_func(self):
pass
extended = ExtendedVirtClass()
m.test_callback_py_obj(lambda: None)
m.test_callback_std_func(lambda: None)
m.test_callback_virtual_func(extended)
m.test_callback_pure_virtual_func(extended)
def _python_to_cpp_to_python_from_threads(num_threads, parallel=False):
"""Calls different C++ functions that come back to Python, from Python threads."""
threads = []
for _ in range(num_threads):
thread = threading.Thread(target=_python_to_cpp_to_python)
thread.daemon = True
thread.start()
if parallel:
threads.append(thread)
else:
thread.join()
for thread in threads:
thread.join()
def test_python_to_cpp_to_python_from_thread():
"""Makes sure there is no GIL deadlock when running in a thread.
It runs in a separate process to be able to stop and assert if it deadlocks.
"""
assert _run_in_process(_python_to_cpp_to_python_from_threads, 1) == 0
def test_python_to_cpp_to_python_from_thread_multiple_parallel():
"""Makes sure there is no GIL deadlock when running in a thread multiple times in parallel.
It runs in a separate process to be able to stop and assert if it deadlocks.
"""
assert _run_in_process(_python_to_cpp_to_python_from_threads, 8, parallel=True) == 0
def test_python_to_cpp_to_python_from_thread_multiple_sequential():
"""Makes sure there is no GIL deadlock when running in a thread multiple times sequentially.
It runs in a separate process to be able to stop and assert if it deadlocks.
"""
assert _run_in_process(_python_to_cpp_to_python_from_threads, 8, parallel=False) == 0
def test_python_to_cpp_to_python_from_process():
"""Makes sure there is no GIL deadlock when using processes.
This test is for completion, but it was never an issue.
"""
assert _run_in_process(_python_to_cpp_to_python) == 0
def test_cross_module_gil():
"""Makes sure that the GIL can be acquired by another module from a GIL-released state."""
m.test_cross_module_gil() # Should not raise a SIGSEGV
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_gil_scoped.py | test_gil_scoped.py |
import pytest
from pybind11_tests import numpy_array as m
pytestmark = pytest.requires_numpy
with pytest.suppress(ImportError):
import numpy as np
def test_dtypes():
# See issue #1328.
# - Platform-dependent sizes.
for size_check in m.get_platform_dtype_size_checks():
print(size_check)
assert size_check.size_cpp == size_check.size_numpy, size_check
# - Concrete sizes.
for check in m.get_concrete_dtype_checks():
print(check)
assert check.numpy == check.pybind11, check
if check.numpy.num != check.pybind11.num:
print("NOTE: typenum mismatch for {}: {} != {}".format(
check, check.numpy.num, check.pybind11.num))
@pytest.fixture(scope='function')
def arr():
return np.array([[1, 2, 3], [4, 5, 6]], '=u2')
def test_array_attributes():
a = np.array(0, 'f8')
assert m.ndim(a) == 0
assert all(m.shape(a) == [])
assert all(m.strides(a) == [])
with pytest.raises(IndexError) as excinfo:
m.shape(a, 0)
assert str(excinfo.value) == 'invalid axis: 0 (ndim = 0)'
with pytest.raises(IndexError) as excinfo:
m.strides(a, 0)
assert str(excinfo.value) == 'invalid axis: 0 (ndim = 0)'
assert m.writeable(a)
assert m.size(a) == 1
assert m.itemsize(a) == 8
assert m.nbytes(a) == 8
assert m.owndata(a)
a = np.array([[1, 2, 3], [4, 5, 6]], 'u2').view()
a.flags.writeable = False
assert m.ndim(a) == 2
assert all(m.shape(a) == [2, 3])
assert m.shape(a, 0) == 2
assert m.shape(a, 1) == 3
assert all(m.strides(a) == [6, 2])
assert m.strides(a, 0) == 6
assert m.strides(a, 1) == 2
with pytest.raises(IndexError) as excinfo:
m.shape(a, 2)
assert str(excinfo.value) == 'invalid axis: 2 (ndim = 2)'
with pytest.raises(IndexError) as excinfo:
m.strides(a, 2)
assert str(excinfo.value) == 'invalid axis: 2 (ndim = 2)'
assert not m.writeable(a)
assert m.size(a) == 6
assert m.itemsize(a) == 2
assert m.nbytes(a) == 12
assert not m.owndata(a)
@pytest.mark.parametrize('args, ret', [([], 0), ([0], 0), ([1], 3), ([0, 1], 1), ([1, 2], 5)])
def test_index_offset(arr, args, ret):
assert m.index_at(arr, *args) == ret
assert m.index_at_t(arr, *args) == ret
assert m.offset_at(arr, *args) == ret * arr.dtype.itemsize
assert m.offset_at_t(arr, *args) == ret * arr.dtype.itemsize
def test_dim_check_fail(arr):
for func in (m.index_at, m.index_at_t, m.offset_at, m.offset_at_t, m.data, m.data_t,
m.mutate_data, m.mutate_data_t):
with pytest.raises(IndexError) as excinfo:
func(arr, 1, 2, 3)
assert str(excinfo.value) == 'too many indices for an array: 3 (ndim = 2)'
@pytest.mark.parametrize('args, ret',
[([], [1, 2, 3, 4, 5, 6]),
([1], [4, 5, 6]),
([0, 1], [2, 3, 4, 5, 6]),
([1, 2], [6])])
def test_data(arr, args, ret):
from sys import byteorder
assert all(m.data_t(arr, *args) == ret)
assert all(m.data(arr, *args)[(0 if byteorder == 'little' else 1)::2] == ret)
assert all(m.data(arr, *args)[(1 if byteorder == 'little' else 0)::2] == 0)
@pytest.mark.parametrize('dim', [0, 1, 3])
def test_at_fail(arr, dim):
for func in m.at_t, m.mutate_at_t:
with pytest.raises(IndexError) as excinfo:
func(arr, *([0] * dim))
assert str(excinfo.value) == 'index dimension mismatch: {} (ndim = 2)'.format(dim)
def test_at(arr):
assert m.at_t(arr, 0, 2) == 3
assert m.at_t(arr, 1, 0) == 4
assert all(m.mutate_at_t(arr, 0, 2).ravel() == [1, 2, 4, 4, 5, 6])
assert all(m.mutate_at_t(arr, 1, 0).ravel() == [1, 2, 4, 5, 5, 6])
def test_mutate_readonly(arr):
arr.flags.writeable = False
for func, args in (m.mutate_data, ()), (m.mutate_data_t, ()), (m.mutate_at_t, (0, 0)):
with pytest.raises(ValueError) as excinfo:
func(arr, *args)
assert str(excinfo.value) == 'array is not writeable'
def test_mutate_data(arr):
assert all(m.mutate_data(arr).ravel() == [2, 4, 6, 8, 10, 12])
assert all(m.mutate_data(arr).ravel() == [4, 8, 12, 16, 20, 24])
assert all(m.mutate_data(arr, 1).ravel() == [4, 8, 12, 32, 40, 48])
assert all(m.mutate_data(arr, 0, 1).ravel() == [4, 16, 24, 64, 80, 96])
assert all(m.mutate_data(arr, 1, 2).ravel() == [4, 16, 24, 64, 80, 192])
assert all(m.mutate_data_t(arr).ravel() == [5, 17, 25, 65, 81, 193])
assert all(m.mutate_data_t(arr).ravel() == [6, 18, 26, 66, 82, 194])
assert all(m.mutate_data_t(arr, 1).ravel() == [6, 18, 26, 67, 83, 195])
assert all(m.mutate_data_t(arr, 0, 1).ravel() == [6, 19, 27, 68, 84, 196])
assert all(m.mutate_data_t(arr, 1, 2).ravel() == [6, 19, 27, 68, 84, 197])
def test_bounds_check(arr):
for func in (m.index_at, m.index_at_t, m.data, m.data_t,
m.mutate_data, m.mutate_data_t, m.at_t, m.mutate_at_t):
with pytest.raises(IndexError) as excinfo:
func(arr, 2, 0)
assert str(excinfo.value) == 'index 2 is out of bounds for axis 0 with size 2'
with pytest.raises(IndexError) as excinfo:
func(arr, 0, 4)
assert str(excinfo.value) == 'index 4 is out of bounds for axis 1 with size 3'
def test_make_c_f_array():
assert m.make_c_array().flags.c_contiguous
assert not m.make_c_array().flags.f_contiguous
assert m.make_f_array().flags.f_contiguous
assert not m.make_f_array().flags.c_contiguous
def test_make_empty_shaped_array():
m.make_empty_shaped_array()
# empty shape means numpy scalar, PEP 3118
assert m.scalar_int().ndim == 0
assert m.scalar_int().shape == ()
assert m.scalar_int() == 42
def test_wrap():
def assert_references(a, b, base=None):
from distutils.version import LooseVersion
if base is None:
base = a
assert a is not b
assert a.__array_interface__['data'][0] == b.__array_interface__['data'][0]
assert a.shape == b.shape
assert a.strides == b.strides
assert a.flags.c_contiguous == b.flags.c_contiguous
assert a.flags.f_contiguous == b.flags.f_contiguous
assert a.flags.writeable == b.flags.writeable
assert a.flags.aligned == b.flags.aligned
if LooseVersion(np.__version__) >= LooseVersion("1.14.0"):
assert a.flags.writebackifcopy == b.flags.writebackifcopy
else:
assert a.flags.updateifcopy == b.flags.updateifcopy
assert np.all(a == b)
assert not b.flags.owndata
assert b.base is base
if a.flags.writeable and a.ndim == 2:
a[0, 0] = 1234
assert b[0, 0] == 1234
a1 = np.array([1, 2], dtype=np.int16)
assert a1.flags.owndata and a1.base is None
a2 = m.wrap(a1)
assert_references(a1, a2)
a1 = np.array([[1, 2], [3, 4]], dtype=np.float32, order='F')
assert a1.flags.owndata and a1.base is None
a2 = m.wrap(a1)
assert_references(a1, a2)
a1 = np.array([[1, 2], [3, 4]], dtype=np.float32, order='C')
a1.flags.writeable = False
a2 = m.wrap(a1)
assert_references(a1, a2)
a1 = np.random.random((4, 4, 4))
a2 = m.wrap(a1)
assert_references(a1, a2)
a1t = a1.transpose()
a2 = m.wrap(a1t)
assert_references(a1t, a2, a1)
a1d = a1.diagonal()
a2 = m.wrap(a1d)
assert_references(a1d, a2, a1)
a1m = a1[::-1, ::-1, ::-1]
a2 = m.wrap(a1m)
assert_references(a1m, a2, a1)
def test_numpy_view(capture):
with capture:
ac = m.ArrayClass()
ac_view_1 = ac.numpy_view()
ac_view_2 = ac.numpy_view()
assert np.all(ac_view_1 == np.array([1, 2], dtype=np.int32))
del ac
pytest.gc_collect()
assert capture == """
ArrayClass()
ArrayClass::numpy_view()
ArrayClass::numpy_view()
"""
ac_view_1[0] = 4
ac_view_1[1] = 3
assert ac_view_2[0] == 4
assert ac_view_2[1] == 3
with capture:
del ac_view_1
del ac_view_2
pytest.gc_collect()
pytest.gc_collect()
assert capture == """
~ArrayClass()
"""
@pytest.unsupported_on_pypy
def test_cast_numpy_int64_to_uint64():
m.function_taking_uint64(123)
m.function_taking_uint64(np.uint64(123))
def test_isinstance():
assert m.isinstance_untyped(np.array([1, 2, 3]), "not an array")
assert m.isinstance_typed(np.array([1.0, 2.0, 3.0]))
def test_constructors():
defaults = m.default_constructors()
for a in defaults.values():
assert a.size == 0
assert defaults["array"].dtype == np.array([]).dtype
assert defaults["array_t<int32>"].dtype == np.int32
assert defaults["array_t<double>"].dtype == np.float64
results = m.converting_constructors([1, 2, 3])
for a in results.values():
np.testing.assert_array_equal(a, [1, 2, 3])
assert results["array"].dtype == np.int_
assert results["array_t<int32>"].dtype == np.int32
assert results["array_t<double>"].dtype == np.float64
def test_overload_resolution(msg):
# Exact overload matches:
assert m.overloaded(np.array([1], dtype='float64')) == 'double'
assert m.overloaded(np.array([1], dtype='float32')) == 'float'
assert m.overloaded(np.array([1], dtype='ushort')) == 'unsigned short'
assert m.overloaded(np.array([1], dtype='intc')) == 'int'
assert m.overloaded(np.array([1], dtype='longlong')) == 'long long'
assert m.overloaded(np.array([1], dtype='complex')) == 'double complex'
assert m.overloaded(np.array([1], dtype='csingle')) == 'float complex'
# No exact match, should call first convertible version:
assert m.overloaded(np.array([1], dtype='uint8')) == 'double'
with pytest.raises(TypeError) as excinfo:
m.overloaded("not an array")
assert msg(excinfo.value) == """
overloaded(): incompatible function arguments. The following argument types are supported:
1. (arg0: numpy.ndarray[float64]) -> str
2. (arg0: numpy.ndarray[float32]) -> str
3. (arg0: numpy.ndarray[int32]) -> str
4. (arg0: numpy.ndarray[uint16]) -> str
5. (arg0: numpy.ndarray[int64]) -> str
6. (arg0: numpy.ndarray[complex128]) -> str
7. (arg0: numpy.ndarray[complex64]) -> str
Invoked with: 'not an array'
"""
assert m.overloaded2(np.array([1], dtype='float64')) == 'double'
assert m.overloaded2(np.array([1], dtype='float32')) == 'float'
assert m.overloaded2(np.array([1], dtype='complex64')) == 'float complex'
assert m.overloaded2(np.array([1], dtype='complex128')) == 'double complex'
assert m.overloaded2(np.array([1], dtype='float32')) == 'float'
assert m.overloaded3(np.array([1], dtype='float64')) == 'double'
assert m.overloaded3(np.array([1], dtype='intc')) == 'int'
expected_exc = """
overloaded3(): incompatible function arguments. The following argument types are supported:
1. (arg0: numpy.ndarray[int32]) -> str
2. (arg0: numpy.ndarray[float64]) -> str
Invoked with: """
with pytest.raises(TypeError) as excinfo:
m.overloaded3(np.array([1], dtype='uintc'))
assert msg(excinfo.value) == expected_exc + repr(np.array([1], dtype='uint32'))
with pytest.raises(TypeError) as excinfo:
m.overloaded3(np.array([1], dtype='float32'))
assert msg(excinfo.value) == expected_exc + repr(np.array([1.], dtype='float32'))
with pytest.raises(TypeError) as excinfo:
m.overloaded3(np.array([1], dtype='complex'))
assert msg(excinfo.value) == expected_exc + repr(np.array([1. + 0.j]))
# Exact matches:
assert m.overloaded4(np.array([1], dtype='double')) == 'double'
assert m.overloaded4(np.array([1], dtype='longlong')) == 'long long'
# Non-exact matches requiring conversion. Since float to integer isn't a
# save conversion, it should go to the double overload, but short can go to
# either (and so should end up on the first-registered, the long long).
assert m.overloaded4(np.array([1], dtype='float32')) == 'double'
assert m.overloaded4(np.array([1], dtype='short')) == 'long long'
assert m.overloaded5(np.array([1], dtype='double')) == 'double'
assert m.overloaded5(np.array([1], dtype='uintc')) == 'unsigned int'
assert m.overloaded5(np.array([1], dtype='float32')) == 'unsigned int'
def test_greedy_string_overload():
"""Tests fix for #685 - ndarray shouldn't go to std::string overload"""
assert m.issue685("abc") == "string"
assert m.issue685(np.array([97, 98, 99], dtype='b')) == "array"
assert m.issue685(123) == "other"
def test_array_unchecked_fixed_dims(msg):
z1 = np.array([[1, 2], [3, 4]], dtype='float64')
m.proxy_add2(z1, 10)
assert np.all(z1 == [[11, 12], [13, 14]])
with pytest.raises(ValueError) as excinfo:
m.proxy_add2(np.array([1., 2, 3]), 5.0)
assert msg(excinfo.value) == "array has incorrect number of dimensions: 1; expected 2"
expect_c = np.ndarray(shape=(3, 3, 3), buffer=np.array(range(3, 30)), dtype='int')
assert np.all(m.proxy_init3(3.0) == expect_c)
expect_f = np.transpose(expect_c)
assert np.all(m.proxy_init3F(3.0) == expect_f)
assert m.proxy_squared_L2_norm(np.array(range(6))) == 55
assert m.proxy_squared_L2_norm(np.array(range(6), dtype="float64")) == 55
assert m.proxy_auxiliaries2(z1) == [11, 11, True, 2, 8, 2, 2, 4, 32]
assert m.proxy_auxiliaries2(z1) == m.array_auxiliaries2(z1)
def test_array_unchecked_dyn_dims(msg):
z1 = np.array([[1, 2], [3, 4]], dtype='float64')
m.proxy_add2_dyn(z1, 10)
assert np.all(z1 == [[11, 12], [13, 14]])
expect_c = np.ndarray(shape=(3, 3, 3), buffer=np.array(range(3, 30)), dtype='int')
assert np.all(m.proxy_init3_dyn(3.0) == expect_c)
assert m.proxy_auxiliaries2_dyn(z1) == [11, 11, True, 2, 8, 2, 2, 4, 32]
assert m.proxy_auxiliaries2_dyn(z1) == m.array_auxiliaries2(z1)
def test_array_failure():
with pytest.raises(ValueError) as excinfo:
m.array_fail_test()
assert str(excinfo.value) == 'cannot create a pybind11::array from a nullptr'
with pytest.raises(ValueError) as excinfo:
m.array_t_fail_test()
assert str(excinfo.value) == 'cannot create a pybind11::array_t from a nullptr'
with pytest.raises(ValueError) as excinfo:
m.array_fail_test_negative_size()
assert str(excinfo.value) == 'negative dimensions are not allowed'
def test_initializer_list():
assert m.array_initializer_list1().shape == (1,)
assert m.array_initializer_list2().shape == (1, 2)
assert m.array_initializer_list3().shape == (1, 2, 3)
assert m.array_initializer_list4().shape == (1, 2, 3, 4)
def test_array_resize(msg):
a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype='float64')
m.array_reshape2(a)
assert(a.size == 9)
assert(np.all(a == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
# total size change should succced with refcheck off
m.array_resize3(a, 4, False)
assert(a.size == 64)
# ... and fail with refcheck on
try:
m.array_resize3(a, 3, True)
except ValueError as e:
assert(str(e).startswith("cannot resize an array"))
# transposed array doesn't own data
b = a.transpose()
try:
m.array_resize3(b, 3, False)
except ValueError as e:
assert(str(e).startswith("cannot resize this array: it does not own its data"))
# ... but reshape should be fine
m.array_reshape2(b)
assert(b.shape == (8, 8))
@pytest.unsupported_on_pypy
def test_array_create_and_resize(msg):
a = m.create_and_resize(2)
assert(a.size == 4)
assert(np.all(a == 42.))
@pytest.unsupported_on_py2
def test_index_using_ellipsis():
a = m.index_using_ellipsis(np.zeros((5, 6, 7)))
assert a.shape == (6,)
@pytest.unsupported_on_pypy
def test_dtype_refcount_leak():
from sys import getrefcount
dtype = np.dtype(np.float_)
a = np.array([1], dtype=dtype)
before = getrefcount(dtype)
m.ndim(a)
after = getrefcount(dtype)
assert after == before
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_numpy_array.py | test_numpy_array.py |
from pybind11_tests import union_ as m
def test_union():
instance = m.TestUnion()
instance.as_uint = 10
assert instance.as_int == 10
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_union.py | test_union.py |
import pytest
from pybind11_tests import methods_and_attributes as m
from pybind11_tests import ConstructorStats
def test_methods_and_attributes():
instance1 = m.ExampleMandA()
instance2 = m.ExampleMandA(32)
instance1.add1(instance2)
instance1.add2(instance2)
instance1.add3(instance2)
instance1.add4(instance2)
instance1.add5(instance2)
instance1.add6(32)
instance1.add7(32)
instance1.add8(32)
instance1.add9(32)
instance1.add10(32)
assert str(instance1) == "ExampleMandA[value=320]"
assert str(instance2) == "ExampleMandA[value=32]"
assert str(instance1.self1()) == "ExampleMandA[value=320]"
assert str(instance1.self2()) == "ExampleMandA[value=320]"
assert str(instance1.self3()) == "ExampleMandA[value=320]"
assert str(instance1.self4()) == "ExampleMandA[value=320]"
assert str(instance1.self5()) == "ExampleMandA[value=320]"
assert instance1.internal1() == 320
assert instance1.internal2() == 320
assert instance1.internal3() == 320
assert instance1.internal4() == 320
assert instance1.internal5() == 320
assert instance1.overloaded() == "()"
assert instance1.overloaded(0) == "(int)"
assert instance1.overloaded(1, 1.0) == "(int, float)"
assert instance1.overloaded(2.0, 2) == "(float, int)"
assert instance1.overloaded(3, 3) == "(int, int)"
assert instance1.overloaded(4., 4.) == "(float, float)"
assert instance1.overloaded_const(-3) == "(int) const"
assert instance1.overloaded_const(5, 5.0) == "(int, float) const"
assert instance1.overloaded_const(6.0, 6) == "(float, int) const"
assert instance1.overloaded_const(7, 7) == "(int, int) const"
assert instance1.overloaded_const(8., 8.) == "(float, float) const"
assert instance1.overloaded_float(1, 1) == "(float, float)"
assert instance1.overloaded_float(1, 1.) == "(float, float)"
assert instance1.overloaded_float(1., 1) == "(float, float)"
assert instance1.overloaded_float(1., 1.) == "(float, float)"
assert instance1.value == 320
instance1.value = 100
assert str(instance1) == "ExampleMandA[value=100]"
cstats = ConstructorStats.get(m.ExampleMandA)
assert cstats.alive() == 2
del instance1, instance2
assert cstats.alive() == 0
assert cstats.values() == ["32"]
assert cstats.default_constructions == 1
assert cstats.copy_constructions == 3
assert cstats.move_constructions >= 1
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
def test_copy_method():
"""Issue #443: calling copied methods fails in Python 3"""
m.ExampleMandA.add2c = m.ExampleMandA.add2
m.ExampleMandA.add2d = m.ExampleMandA.add2b
a = m.ExampleMandA(123)
assert a.value == 123
a.add2(m.ExampleMandA(-100))
assert a.value == 23
a.add2b(m.ExampleMandA(20))
assert a.value == 43
a.add2c(m.ExampleMandA(6))
assert a.value == 49
a.add2d(m.ExampleMandA(-7))
assert a.value == 42
def test_properties():
instance = m.TestProperties()
assert instance.def_readonly == 1
with pytest.raises(AttributeError):
instance.def_readonly = 2
instance.def_readwrite = 2
assert instance.def_readwrite == 2
assert instance.def_property_readonly == 2
with pytest.raises(AttributeError):
instance.def_property_readonly = 3
instance.def_property = 3
assert instance.def_property == 3
with pytest.raises(AttributeError) as excinfo:
dummy = instance.def_property_writeonly # noqa: F841 unused var
assert "unreadable attribute" in str(excinfo.value)
instance.def_property_writeonly = 4
assert instance.def_property_readonly == 4
with pytest.raises(AttributeError) as excinfo:
dummy = instance.def_property_impossible # noqa: F841 unused var
assert "unreadable attribute" in str(excinfo.value)
with pytest.raises(AttributeError) as excinfo:
instance.def_property_impossible = 5
assert "can't set attribute" in str(excinfo.value)
def test_static_properties():
assert m.TestProperties.def_readonly_static == 1
with pytest.raises(AttributeError) as excinfo:
m.TestProperties.def_readonly_static = 2
assert "can't set attribute" in str(excinfo.value)
m.TestProperties.def_readwrite_static = 2
assert m.TestProperties.def_readwrite_static == 2
with pytest.raises(AttributeError) as excinfo:
dummy = m.TestProperties.def_writeonly_static # noqa: F841 unused var
assert "unreadable attribute" in str(excinfo.value)
m.TestProperties.def_writeonly_static = 3
assert m.TestProperties.def_readonly_static == 3
assert m.TestProperties.def_property_readonly_static == 3
with pytest.raises(AttributeError) as excinfo:
m.TestProperties.def_property_readonly_static = 99
assert "can't set attribute" in str(excinfo.value)
m.TestProperties.def_property_static = 4
assert m.TestProperties.def_property_static == 4
with pytest.raises(AttributeError) as excinfo:
dummy = m.TestProperties.def_property_writeonly_static
assert "unreadable attribute" in str(excinfo.value)
m.TestProperties.def_property_writeonly_static = 5
assert m.TestProperties.def_property_static == 5
# Static property read and write via instance
instance = m.TestProperties()
m.TestProperties.def_readwrite_static = 0
assert m.TestProperties.def_readwrite_static == 0
assert instance.def_readwrite_static == 0
instance.def_readwrite_static = 2
assert m.TestProperties.def_readwrite_static == 2
assert instance.def_readwrite_static == 2
with pytest.raises(AttributeError) as excinfo:
dummy = instance.def_property_writeonly_static # noqa: F841 unused var
assert "unreadable attribute" in str(excinfo.value)
instance.def_property_writeonly_static = 4
assert instance.def_property_static == 4
# It should be possible to override properties in derived classes
assert m.TestPropertiesOverride().def_readonly == 99
assert m.TestPropertiesOverride.def_readonly_static == 99
def test_static_cls():
"""Static property getter and setters expect the type object as the their only argument"""
instance = m.TestProperties()
assert m.TestProperties.static_cls is m.TestProperties
assert instance.static_cls is m.TestProperties
def check_self(self):
assert self is m.TestProperties
m.TestProperties.static_cls = check_self
instance.static_cls = check_self
def test_metaclass_override():
"""Overriding pybind11's default metaclass changes the behavior of `static_property`"""
assert type(m.ExampleMandA).__name__ == "pybind11_type"
assert type(m.MetaclassOverride).__name__ == "type"
assert m.MetaclassOverride.readonly == 1
assert type(m.MetaclassOverride.__dict__["readonly"]).__name__ == "pybind11_static_property"
# Regular `type` replaces the property instead of calling `__set__()`
m.MetaclassOverride.readonly = 2
assert m.MetaclassOverride.readonly == 2
assert isinstance(m.MetaclassOverride.__dict__["readonly"], int)
def test_no_mixed_overloads():
from pybind11_tests import debug_enabled
with pytest.raises(RuntimeError) as excinfo:
m.ExampleMandA.add_mixed_overloads1()
assert (str(excinfo.value) ==
"overloading a method with both static and instance methods is not supported; " +
("compile in debug mode for more details" if not debug_enabled else
"error while attempting to bind static method ExampleMandA.overload_mixed1"
"(arg0: float) -> str")
)
with pytest.raises(RuntimeError) as excinfo:
m.ExampleMandA.add_mixed_overloads2()
assert (str(excinfo.value) ==
"overloading a method with both static and instance methods is not supported; " +
("compile in debug mode for more details" if not debug_enabled else
"error while attempting to bind instance method ExampleMandA.overload_mixed2"
"(self: pybind11_tests.methods_and_attributes.ExampleMandA, arg0: int, arg1: int)"
" -> str")
)
@pytest.mark.parametrize("access", ["ro", "rw", "static_ro", "static_rw"])
def test_property_return_value_policies(access):
if not access.startswith("static"):
obj = m.TestPropRVP()
else:
obj = m.TestPropRVP
ref = getattr(obj, access + "_ref")
assert ref.value == 1
ref.value = 2
assert getattr(obj, access + "_ref").value == 2
ref.value = 1 # restore original value for static properties
copy = getattr(obj, access + "_copy")
assert copy.value == 1
copy.value = 2
assert getattr(obj, access + "_copy").value == 1
copy = getattr(obj, access + "_func")
assert copy.value == 1
copy.value = 2
assert getattr(obj, access + "_func").value == 1
def test_property_rvalue_policy():
"""When returning an rvalue, the return value policy is automatically changed from
`reference(_internal)` to `move`. The following would not work otherwise."""
instance = m.TestPropRVP()
o = instance.rvalue
assert o.value == 1
os = m.TestPropRVP.static_rvalue
assert os.value == 1
# https://bitbucket.org/pypy/pypy/issues/2447
@pytest.unsupported_on_pypy
def test_dynamic_attributes():
instance = m.DynamicClass()
assert not hasattr(instance, "foo")
assert "foo" not in dir(instance)
# Dynamically add attribute
instance.foo = 42
assert hasattr(instance, "foo")
assert instance.foo == 42
assert "foo" in dir(instance)
# __dict__ should be accessible and replaceable
assert "foo" in instance.__dict__
instance.__dict__ = {"bar": True}
assert not hasattr(instance, "foo")
assert hasattr(instance, "bar")
with pytest.raises(TypeError) as excinfo:
instance.__dict__ = []
assert str(excinfo.value) == "__dict__ must be set to a dictionary, not a 'list'"
cstats = ConstructorStats.get(m.DynamicClass)
assert cstats.alive() == 1
del instance
assert cstats.alive() == 0
# Derived classes should work as well
class PythonDerivedDynamicClass(m.DynamicClass):
pass
for cls in m.CppDerivedDynamicClass, PythonDerivedDynamicClass:
derived = cls()
derived.foobar = 100
assert derived.foobar == 100
assert cstats.alive() == 1
del derived
assert cstats.alive() == 0
# https://bitbucket.org/pypy/pypy/issues/2447
@pytest.unsupported_on_pypy
def test_cyclic_gc():
# One object references itself
instance = m.DynamicClass()
instance.circular_reference = instance
cstats = ConstructorStats.get(m.DynamicClass)
assert cstats.alive() == 1
del instance
assert cstats.alive() == 0
# Two object reference each other
i1 = m.DynamicClass()
i2 = m.DynamicClass()
i1.cycle = i2
i2.cycle = i1
assert cstats.alive() == 2
del i1, i2
assert cstats.alive() == 0
def test_noconvert_args(msg):
a = m.ArgInspector()
assert msg(a.f("hi")) == """
loading ArgInspector1 argument WITH conversion allowed. Argument value = hi
"""
assert msg(a.g("this is a", "this is b")) == """
loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = this is a
loading ArgInspector1 argument WITH conversion allowed. Argument value = this is b
13
loading ArgInspector2 argument WITH conversion allowed. Argument value = (default arg inspector 2)
""" # noqa: E501 line too long
assert msg(a.g("this is a", "this is b", 42)) == """
loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = this is a
loading ArgInspector1 argument WITH conversion allowed. Argument value = this is b
42
loading ArgInspector2 argument WITH conversion allowed. Argument value = (default arg inspector 2)
""" # noqa: E501 line too long
assert msg(a.g("this is a", "this is b", 42, "this is d")) == """
loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = this is a
loading ArgInspector1 argument WITH conversion allowed. Argument value = this is b
42
loading ArgInspector2 argument WITH conversion allowed. Argument value = this is d
"""
assert (a.h("arg 1") ==
"loading ArgInspector2 argument WITHOUT conversion allowed. Argument value = arg 1")
assert msg(m.arg_inspect_func("A1", "A2")) == """
loading ArgInspector2 argument WITH conversion allowed. Argument value = A1
loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = A2
"""
assert m.floats_preferred(4) == 2.0
assert m.floats_only(4.0) == 2.0
with pytest.raises(TypeError) as excinfo:
m.floats_only(4)
assert msg(excinfo.value) == """
floats_only(): incompatible function arguments. The following argument types are supported:
1. (f: float) -> float
Invoked with: 4
"""
assert m.ints_preferred(4) == 2
assert m.ints_preferred(True) == 0
with pytest.raises(TypeError) as excinfo:
m.ints_preferred(4.0)
assert msg(excinfo.value) == """
ints_preferred(): incompatible function arguments. The following argument types are supported:
1. (i: int) -> int
Invoked with: 4.0
""" # noqa: E501 line too long
assert m.ints_only(4) == 2
with pytest.raises(TypeError) as excinfo:
m.ints_only(4.0)
assert msg(excinfo.value) == """
ints_only(): incompatible function arguments. The following argument types are supported:
1. (i: int) -> int
Invoked with: 4.0
"""
def test_bad_arg_default(msg):
from pybind11_tests import debug_enabled
with pytest.raises(RuntimeError) as excinfo:
m.bad_arg_def_named()
assert msg(excinfo.value) == (
"arg(): could not convert default argument 'a: UnregisteredType' in function "
"'should_fail' into a Python object (type not registered yet?)"
if debug_enabled else
"arg(): could not convert default argument into a Python object (type not registered "
"yet?). Compile in debug mode for more information."
)
with pytest.raises(RuntimeError) as excinfo:
m.bad_arg_def_unnamed()
assert msg(excinfo.value) == (
"arg(): could not convert default argument 'UnregisteredType' in function "
"'should_fail' into a Python object (type not registered yet?)"
if debug_enabled else
"arg(): could not convert default argument into a Python object (type not registered "
"yet?). Compile in debug mode for more information."
)
def test_accepts_none(msg):
a = m.NoneTester()
assert m.no_none1(a) == 42
assert m.no_none2(a) == 42
assert m.no_none3(a) == 42
assert m.no_none4(a) == 42
assert m.no_none5(a) == 42
assert m.ok_none1(a) == 42
assert m.ok_none2(a) == 42
assert m.ok_none3(a) == 42
assert m.ok_none4(a) == 42
assert m.ok_none5(a) == 42
with pytest.raises(TypeError) as excinfo:
m.no_none1(None)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.no_none2(None)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.no_none3(None)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.no_none4(None)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.no_none5(None)
assert "incompatible function arguments" in str(excinfo.value)
# The first one still raises because you can't pass None as a lvalue reference arg:
with pytest.raises(TypeError) as excinfo:
assert m.ok_none1(None) == -1
assert msg(excinfo.value) == """
ok_none1(): incompatible function arguments. The following argument types are supported:
1. (arg0: m.methods_and_attributes.NoneTester) -> int
Invoked with: None
"""
# The rest take the argument as pointer or holder, and accept None:
assert m.ok_none2(None) == -1
assert m.ok_none3(None) == -1
assert m.ok_none4(None) == -1
assert m.ok_none5(None) == -1
def test_str_issue(msg):
"""#283: __str__ called on uninitialized instance when constructor arguments invalid"""
assert str(m.StrIssue(3)) == "StrIssue[3]"
with pytest.raises(TypeError) as excinfo:
str(m.StrIssue("no", "such", "constructor"))
assert msg(excinfo.value) == """
__init__(): incompatible constructor arguments. The following argument types are supported:
1. m.methods_and_attributes.StrIssue(arg0: int)
2. m.methods_and_attributes.StrIssue()
Invoked with: 'no', 'such', 'constructor'
"""
def test_unregistered_base_implementations():
a = m.RegisteredDerived()
a.do_nothing()
assert a.rw_value == 42
assert a.ro_value == 1.25
a.rw_value += 5
assert a.sum() == 48.25
a.increase_value()
assert a.rw_value == 48
assert a.ro_value == 1.5
assert a.sum() == 49.5
assert a.rw_value_prop == 48
a.rw_value_prop += 1
assert a.rw_value_prop == 49
a.increase_value()
assert a.ro_value_prop == 1.75
def test_custom_caster_destruction():
"""Tests that returning a pointer to a type that gets converted with a custom type caster gets
destroyed when the function has py::return_value_policy::take_ownership policy applied."""
cstats = m.destruction_tester_cstats()
# This one *doesn't* have take_ownership: the pointer should be used but not destroyed:
z = m.custom_caster_no_destroy()
assert cstats.alive() == 1 and cstats.default_constructions == 1
assert z
# take_ownership applied: this constructs a new object, casts it, then destroys it:
z = m.custom_caster_destroy()
assert z
assert cstats.default_constructions == 2
# Same, but with a const pointer return (which should *not* inhibit destruction):
z = m.custom_caster_destroy_const()
assert z
assert cstats.default_constructions == 3
# Make sure we still only have the original object (from ..._no_destroy()) alive:
assert cstats.alive() == 1
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_methods_and_attributes.py | test_methods_and_attributes.py |
# This file is called from 'test_eval.py'
if 'call_test2' in locals():
call_test2(y) # noqa: F821 undefined name
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_eval_call.py | test_eval_call.py |
import pytest
from pybind11_tests import class_ as m
from pybind11_tests import UserType, ConstructorStats
def test_repr():
# In Python 3.3+, repr() accesses __qualname__
assert "pybind11_type" in repr(type(UserType))
assert "UserType" in repr(UserType)
def test_instance(msg):
with pytest.raises(TypeError) as excinfo:
m.NoConstructor()
assert msg(excinfo.value) == "m.class_.NoConstructor: No constructor defined!"
instance = m.NoConstructor.new_instance()
cstats = ConstructorStats.get(m.NoConstructor)
assert cstats.alive() == 1
del instance
assert cstats.alive() == 0
def test_docstrings(doc):
assert doc(UserType) == "A `py::class_` type for testing"
assert UserType.__name__ == "UserType"
assert UserType.__module__ == "pybind11_tests"
assert UserType.get_value.__name__ == "get_value"
assert UserType.get_value.__module__ == "pybind11_tests"
assert doc(UserType.get_value) == """
get_value(self: m.UserType) -> int
Get value using a method
"""
assert doc(UserType.value) == "Get/set value using a property"
assert doc(m.NoConstructor.new_instance) == """
new_instance() -> m.class_.NoConstructor
Return an instance
"""
def test_qualname(doc):
"""Tests that a properly qualified name is set in __qualname__ (even in pre-3.3, where we
backport the attribute) and that generated docstrings properly use it and the module name"""
assert m.NestBase.__qualname__ == "NestBase"
assert m.NestBase.Nested.__qualname__ == "NestBase.Nested"
assert doc(m.NestBase.__init__) == """
__init__(self: m.class_.NestBase) -> None
"""
assert doc(m.NestBase.g) == """
g(self: m.class_.NestBase, arg0: m.class_.NestBase.Nested) -> None
"""
assert doc(m.NestBase.Nested.__init__) == """
__init__(self: m.class_.NestBase.Nested) -> None
"""
assert doc(m.NestBase.Nested.fn) == """
fn(self: m.class_.NestBase.Nested, arg0: int, arg1: m.class_.NestBase, arg2: m.class_.NestBase.Nested) -> None
""" # noqa: E501 line too long
assert doc(m.NestBase.Nested.fa) == """
fa(self: m.class_.NestBase.Nested, a: int, b: m.class_.NestBase, c: m.class_.NestBase.Nested) -> None
""" # noqa: E501 line too long
assert m.NestBase.__module__ == "pybind11_tests.class_"
assert m.NestBase.Nested.__module__ == "pybind11_tests.class_"
def test_inheritance(msg):
roger = m.Rabbit('Rabbit')
assert roger.name() + " is a " + roger.species() == "Rabbit is a parrot"
assert m.pet_name_species(roger) == "Rabbit is a parrot"
polly = m.Pet('Polly', 'parrot')
assert polly.name() + " is a " + polly.species() == "Polly is a parrot"
assert m.pet_name_species(polly) == "Polly is a parrot"
molly = m.Dog('Molly')
assert molly.name() + " is a " + molly.species() == "Molly is a dog"
assert m.pet_name_species(molly) == "Molly is a dog"
fred = m.Hamster('Fred')
assert fred.name() + " is a " + fred.species() == "Fred is a rodent"
assert m.dog_bark(molly) == "Woof!"
with pytest.raises(TypeError) as excinfo:
m.dog_bark(polly)
assert msg(excinfo.value) == """
dog_bark(): incompatible function arguments. The following argument types are supported:
1. (arg0: m.class_.Dog) -> str
Invoked with: <m.class_.Pet object at 0>
"""
with pytest.raises(TypeError) as excinfo:
m.Chimera("lion", "goat")
assert "No constructor defined!" in str(excinfo.value)
def test_automatic_upcasting():
assert type(m.return_class_1()).__name__ == "DerivedClass1"
assert type(m.return_class_2()).__name__ == "DerivedClass2"
assert type(m.return_none()).__name__ == "NoneType"
# Repeat these a few times in a random order to ensure no invalid caching is applied
assert type(m.return_class_n(1)).__name__ == "DerivedClass1"
assert type(m.return_class_n(2)).__name__ == "DerivedClass2"
assert type(m.return_class_n(0)).__name__ == "BaseClass"
assert type(m.return_class_n(2)).__name__ == "DerivedClass2"
assert type(m.return_class_n(2)).__name__ == "DerivedClass2"
assert type(m.return_class_n(0)).__name__ == "BaseClass"
assert type(m.return_class_n(1)).__name__ == "DerivedClass1"
def test_isinstance():
objects = [tuple(), dict(), m.Pet("Polly", "parrot")] + [m.Dog("Molly")] * 4
expected = (True, True, True, True, True, False, False)
assert m.check_instances(objects) == expected
def test_mismatched_holder():
import re
with pytest.raises(RuntimeError) as excinfo:
m.mismatched_holder_1()
assert re.match('generic_type: type ".*MismatchDerived1" does not have a non-default '
'holder type while its base ".*MismatchBase1" does', str(excinfo.value))
with pytest.raises(RuntimeError) as excinfo:
m.mismatched_holder_2()
assert re.match('generic_type: type ".*MismatchDerived2" has a non-default holder type '
'while its base ".*MismatchBase2" does not', str(excinfo.value))
def test_override_static():
"""#511: problem with inheritance + overwritten def_static"""
b = m.MyBase.make()
d1 = m.MyDerived.make2()
d2 = m.MyDerived.make()
assert isinstance(b, m.MyBase)
assert isinstance(d1, m.MyDerived)
assert isinstance(d2, m.MyDerived)
def test_implicit_conversion_life_support():
"""Ensure the lifetime of temporary objects created for implicit conversions"""
assert m.implicitly_convert_argument(UserType(5)) == 5
assert m.implicitly_convert_variable(UserType(5)) == 5
assert "outside a bound function" in m.implicitly_convert_variable_fail(UserType(5))
def test_operator_new_delete(capture):
"""Tests that class-specific operator new/delete functions are invoked"""
class SubAliased(m.AliasedHasOpNewDelSize):
pass
with capture:
a = m.HasOpNewDel()
b = m.HasOpNewDelSize()
d = m.HasOpNewDelBoth()
assert capture == """
A new 8
B new 4
D new 32
"""
sz_alias = str(m.AliasedHasOpNewDelSize.size_alias)
sz_noalias = str(m.AliasedHasOpNewDelSize.size_noalias)
with capture:
c = m.AliasedHasOpNewDelSize()
c2 = SubAliased()
assert capture == (
"C new " + sz_noalias + "\n" +
"C new " + sz_alias + "\n"
)
with capture:
del a
pytest.gc_collect()
del b
pytest.gc_collect()
del d
pytest.gc_collect()
assert capture == """
A delete
B delete 4
D delete
"""
with capture:
del c
pytest.gc_collect()
del c2
pytest.gc_collect()
assert capture == (
"C delete " + sz_noalias + "\n" +
"C delete " + sz_alias + "\n"
)
def test_bind_protected_functions():
"""Expose protected member functions to Python using a helper class"""
a = m.ProtectedA()
assert a.foo() == 42
b = m.ProtectedB()
assert b.foo() == 42
class C(m.ProtectedB):
def __init__(self):
m.ProtectedB.__init__(self)
def foo(self):
return 0
c = C()
assert c.foo() == 0
def test_brace_initialization():
""" Tests that simple POD classes can be constructed using C++11 brace initialization """
a = m.BraceInitialization(123, "test")
assert a.field1 == 123
assert a.field2 == "test"
# Tests that a non-simple class doesn't get brace initialization (if the
# class defines an initializer_list constructor, in particular, it would
# win over the expected constructor).
b = m.NoBraceInitialization([123, 456])
assert b.vec == [123, 456]
@pytest.unsupported_on_pypy
def test_class_refcount():
"""Instances must correctly increase/decrease the reference count of their types (#1029)"""
from sys import getrefcount
class PyDog(m.Dog):
pass
for cls in m.Dog, PyDog:
refcount_1 = getrefcount(cls)
molly = [cls("Molly") for _ in range(10)]
refcount_2 = getrefcount(cls)
del molly
pytest.gc_collect()
refcount_3 = getrefcount(cls)
assert refcount_1 == refcount_3
assert refcount_2 > refcount_1
def test_reentrant_implicit_conversion_failure(msg):
# ensure that there is no runaway reentrant implicit conversion (#1035)
with pytest.raises(TypeError) as excinfo:
m.BogusImplicitConversion(0)
assert msg(excinfo.value) == '''
__init__(): incompatible constructor arguments. The following argument types are supported:
1. m.class_.BogusImplicitConversion(arg0: m.class_.BogusImplicitConversion)
Invoked with: 0
'''
def test_error_after_conversions():
with pytest.raises(TypeError) as exc_info:
m.test_error_after_conversions("hello")
assert str(exc_info.value).startswith(
"Unable to convert function return value to a Python type!")
def test_aligned():
if hasattr(m, "Aligned"):
p = m.Aligned().ptr()
assert p % 1024 == 0
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_class.py | test_class.py |
import pytest
from pybind11_tests import virtual_functions as m
from pybind11_tests import ConstructorStats
def test_override(capture, msg):
class ExtendedExampleVirt(m.ExampleVirt):
def __init__(self, state):
super(ExtendedExampleVirt, self).__init__(state + 1)
self.data = "Hello world"
def run(self, value):
print('ExtendedExampleVirt::run(%i), calling parent..' % value)
return super(ExtendedExampleVirt, self).run(value + 1)
def run_bool(self):
print('ExtendedExampleVirt::run_bool()')
return False
def get_string1(self):
return "override1"
def pure_virtual(self):
print('ExtendedExampleVirt::pure_virtual(): %s' % self.data)
class ExtendedExampleVirt2(ExtendedExampleVirt):
def __init__(self, state):
super(ExtendedExampleVirt2, self).__init__(state + 1)
def get_string2(self):
return "override2"
ex12 = m.ExampleVirt(10)
with capture:
assert m.runExampleVirt(ex12, 20) == 30
assert capture == """
Original implementation of ExampleVirt::run(state=10, value=20, str1=default1, str2=default2)
""" # noqa: E501 line too long
with pytest.raises(RuntimeError) as excinfo:
m.runExampleVirtVirtual(ex12)
assert msg(excinfo.value) == 'Tried to call pure virtual function "ExampleVirt::pure_virtual"'
ex12p = ExtendedExampleVirt(10)
with capture:
assert m.runExampleVirt(ex12p, 20) == 32
assert capture == """
ExtendedExampleVirt::run(20), calling parent..
Original implementation of ExampleVirt::run(state=11, value=21, str1=override1, str2=default2)
""" # noqa: E501 line too long
with capture:
assert m.runExampleVirtBool(ex12p) is False
assert capture == "ExtendedExampleVirt::run_bool()"
with capture:
m.runExampleVirtVirtual(ex12p)
assert capture == "ExtendedExampleVirt::pure_virtual(): Hello world"
ex12p2 = ExtendedExampleVirt2(15)
with capture:
assert m.runExampleVirt(ex12p2, 50) == 68
assert capture == """
ExtendedExampleVirt::run(50), calling parent..
Original implementation of ExampleVirt::run(state=17, value=51, str1=override1, str2=override2)
""" # noqa: E501 line too long
cstats = ConstructorStats.get(m.ExampleVirt)
assert cstats.alive() == 3
del ex12, ex12p, ex12p2
assert cstats.alive() == 0
assert cstats.values() == ['10', '11', '17']
assert cstats.copy_constructions == 0
assert cstats.move_constructions >= 0
def test_alias_delay_initialization1(capture):
"""`A` only initializes its trampoline class when we inherit from it
If we just create and use an A instance directly, the trampoline initialization is
bypassed and we only initialize an A() instead (for performance reasons).
"""
class B(m.A):
def __init__(self):
super(B, self).__init__()
def f(self):
print("In python f()")
# C++ version
with capture:
a = m.A()
m.call_f(a)
del a
pytest.gc_collect()
assert capture == "A.f()"
# Python version
with capture:
b = B()
m.call_f(b)
del b
pytest.gc_collect()
assert capture == """
PyA.PyA()
PyA.f()
In python f()
PyA.~PyA()
"""
def test_alias_delay_initialization2(capture):
"""`A2`, unlike the above, is configured to always initialize the alias
While the extra initialization and extra class layer has small virtual dispatch
performance penalty, it also allows us to do more things with the trampoline
class such as defining local variables and performing construction/destruction.
"""
class B2(m.A2):
def __init__(self):
super(B2, self).__init__()
def f(self):
print("In python B2.f()")
# No python subclass version
with capture:
a2 = m.A2()
m.call_f(a2)
del a2
pytest.gc_collect()
a3 = m.A2(1)
m.call_f(a3)
del a3
pytest.gc_collect()
assert capture == """
PyA2.PyA2()
PyA2.f()
A2.f()
PyA2.~PyA2()
PyA2.PyA2()
PyA2.f()
A2.f()
PyA2.~PyA2()
"""
# Python subclass version
with capture:
b2 = B2()
m.call_f(b2)
del b2
pytest.gc_collect()
assert capture == """
PyA2.PyA2()
PyA2.f()
In python B2.f()
PyA2.~PyA2()
"""
# PyPy: Reference count > 1 causes call with noncopyable instance
# to fail in ncv1.print_nc()
@pytest.unsupported_on_pypy
@pytest.mark.skipif(not hasattr(m, "NCVirt"), reason="NCVirt test broken on ICPC")
def test_move_support():
class NCVirtExt(m.NCVirt):
def get_noncopyable(self, a, b):
# Constructs and returns a new instance:
nc = m.NonCopyable(a * a, b * b)
return nc
def get_movable(self, a, b):
# Return a referenced copy
self.movable = m.Movable(a, b)
return self.movable
class NCVirtExt2(m.NCVirt):
def get_noncopyable(self, a, b):
# Keep a reference: this is going to throw an exception
self.nc = m.NonCopyable(a, b)
return self.nc
def get_movable(self, a, b):
# Return a new instance without storing it
return m.Movable(a, b)
ncv1 = NCVirtExt()
assert ncv1.print_nc(2, 3) == "36"
assert ncv1.print_movable(4, 5) == "9"
ncv2 = NCVirtExt2()
assert ncv2.print_movable(7, 7) == "14"
# Don't check the exception message here because it differs under debug/non-debug mode
with pytest.raises(RuntimeError):
ncv2.print_nc(9, 9)
nc_stats = ConstructorStats.get(m.NonCopyable)
mv_stats = ConstructorStats.get(m.Movable)
assert nc_stats.alive() == 1
assert mv_stats.alive() == 1
del ncv1, ncv2
assert nc_stats.alive() == 0
assert mv_stats.alive() == 0
assert nc_stats.values() == ['4', '9', '9', '9']
assert mv_stats.values() == ['4', '5', '7', '7']
assert nc_stats.copy_constructions == 0
assert mv_stats.copy_constructions == 1
assert nc_stats.move_constructions >= 0
assert mv_stats.move_constructions >= 0
def test_dispatch_issue(msg):
"""#159: virtual function dispatch has problems with similar-named functions"""
class PyClass1(m.DispatchIssue):
def dispatch(self):
return "Yay.."
class PyClass2(m.DispatchIssue):
def dispatch(self):
with pytest.raises(RuntimeError) as excinfo:
super(PyClass2, self).dispatch()
assert msg(excinfo.value) == 'Tried to call pure virtual function "Base::dispatch"'
p = PyClass1()
return m.dispatch_issue_go(p)
b = PyClass2()
assert m.dispatch_issue_go(b) == "Yay.."
def test_override_ref():
"""#392/397: overriding reference-returning functions"""
o = m.OverrideTest("asdf")
# Not allowed (see associated .cpp comment)
# i = o.str_ref()
# assert o.str_ref() == "asdf"
assert o.str_value() == "asdf"
assert o.A_value().value == "hi"
a = o.A_ref()
assert a.value == "hi"
a.value = "bye"
assert a.value == "bye"
def test_inherited_virtuals():
class AR(m.A_Repeat):
def unlucky_number(self):
return 99
class AT(m.A_Tpl):
def unlucky_number(self):
return 999
obj = AR()
assert obj.say_something(3) == "hihihi"
assert obj.unlucky_number() == 99
assert obj.say_everything() == "hi 99"
obj = AT()
assert obj.say_something(3) == "hihihi"
assert obj.unlucky_number() == 999
assert obj.say_everything() == "hi 999"
for obj in [m.B_Repeat(), m.B_Tpl()]:
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 13
assert obj.lucky_number() == 7.0
assert obj.say_everything() == "B says hi 1 times 13"
for obj in [m.C_Repeat(), m.C_Tpl()]:
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 888.0
assert obj.say_everything() == "B says hi 1 times 4444"
class CR(m.C_Repeat):
def lucky_number(self):
return m.C_Repeat.lucky_number(self) + 1.25
obj = CR()
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 889.25
assert obj.say_everything() == "B says hi 1 times 4444"
class CT(m.C_Tpl):
pass
obj = CT()
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 888.0
assert obj.say_everything() == "B says hi 1 times 4444"
class CCR(CR):
def lucky_number(self):
return CR.lucky_number(self) * 10
obj = CCR()
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 8892.5
assert obj.say_everything() == "B says hi 1 times 4444"
class CCT(CT):
def lucky_number(self):
return CT.lucky_number(self) * 1000
obj = CCT()
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 888000.0
assert obj.say_everything() == "B says hi 1 times 4444"
class DR(m.D_Repeat):
def unlucky_number(self):
return 123
def lucky_number(self):
return 42.0
for obj in [m.D_Repeat(), m.D_Tpl()]:
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 888.0
assert obj.say_everything() == "B says hi 1 times 4444"
obj = DR()
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 123
assert obj.lucky_number() == 42.0
assert obj.say_everything() == "B says hi 1 times 123"
class DT(m.D_Tpl):
def say_something(self, times):
return "DT says:" + (' quack' * times)
def unlucky_number(self):
return 1234
def lucky_number(self):
return -4.25
obj = DT()
assert obj.say_something(3) == "DT says: quack quack quack"
assert obj.unlucky_number() == 1234
assert obj.lucky_number() == -4.25
assert obj.say_everything() == "DT says: quack 1234"
class DT2(DT):
def say_something(self, times):
return "DT2: " + ('QUACK' * times)
def unlucky_number(self):
return -3
class BT(m.B_Tpl):
def say_something(self, times):
return "BT" * times
def unlucky_number(self):
return -7
def lucky_number(self):
return -1.375
obj = BT()
assert obj.say_something(3) == "BTBTBT"
assert obj.unlucky_number() == -7
assert obj.lucky_number() == -1.375
assert obj.say_everything() == "BT -7"
def test_issue_1454():
# Fix issue #1454 (crash when acquiring/releasing GIL on another thread in Python 2.7)
m.test_gil()
m.test_gil_from_thread()
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_virtual_functions.py | test_virtual_functions.py |
import pytest
from pybind11_tests import operators as m
from pybind11_tests import ConstructorStats
def test_operator_overloading():
v1 = m.Vector2(1, 2)
v2 = m.Vector(3, -1)
assert str(v1) == "[1.000000, 2.000000]"
assert str(v2) == "[3.000000, -1.000000]"
assert str(-v2) == "[-3.000000, 1.000000]"
assert str(v1 + v2) == "[4.000000, 1.000000]"
assert str(v1 - v2) == "[-2.000000, 3.000000]"
assert str(v1 - 8) == "[-7.000000, -6.000000]"
assert str(v1 + 8) == "[9.000000, 10.000000]"
assert str(v1 * 8) == "[8.000000, 16.000000]"
assert str(v1 / 8) == "[0.125000, 0.250000]"
assert str(8 - v1) == "[7.000000, 6.000000]"
assert str(8 + v1) == "[9.000000, 10.000000]"
assert str(8 * v1) == "[8.000000, 16.000000]"
assert str(8 / v1) == "[8.000000, 4.000000]"
assert str(v1 * v2) == "[3.000000, -2.000000]"
assert str(v2 / v1) == "[3.000000, -0.500000]"
v1 += 2 * v2
assert str(v1) == "[7.000000, 0.000000]"
v1 -= v2
assert str(v1) == "[4.000000, 1.000000]"
v1 *= 2
assert str(v1) == "[8.000000, 2.000000]"
v1 /= 16
assert str(v1) == "[0.500000, 0.125000]"
v1 *= v2
assert str(v1) == "[1.500000, -0.125000]"
v2 /= v1
assert str(v2) == "[2.000000, 8.000000]"
assert hash(v1) == 4
cstats = ConstructorStats.get(m.Vector2)
assert cstats.alive() == 2
del v1
assert cstats.alive() == 1
del v2
assert cstats.alive() == 0
assert cstats.values() == ['[1.000000, 2.000000]', '[3.000000, -1.000000]',
'[-3.000000, 1.000000]', '[4.000000, 1.000000]',
'[-2.000000, 3.000000]', '[-7.000000, -6.000000]',
'[9.000000, 10.000000]', '[8.000000, 16.000000]',
'[0.125000, 0.250000]', '[7.000000, 6.000000]',
'[9.000000, 10.000000]', '[8.000000, 16.000000]',
'[8.000000, 4.000000]', '[3.000000, -2.000000]',
'[3.000000, -0.500000]', '[6.000000, -2.000000]']
assert cstats.default_constructions == 0
assert cstats.copy_constructions == 0
assert cstats.move_constructions >= 10
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
def test_operators_notimplemented():
"""#393: need to return NotSupported to ensure correct arithmetic operator behavior"""
c1, c2 = m.C1(), m.C2()
assert c1 + c1 == 11
assert c2 + c2 == 22
assert c2 + c1 == 21
assert c1 + c2 == 12
def test_nested():
"""#328: first member in a class can't be used in operators"""
a = m.NestA()
b = m.NestB()
c = m.NestC()
a += 10
assert m.get_NestA(a) == 13
b.a += 100
assert m.get_NestA(b.a) == 103
c.b.a += 1000
assert m.get_NestA(c.b.a) == 1003
b -= 1
assert m.get_NestB(b) == 3
c.b -= 3
assert m.get_NestB(c.b) == 1
c *= 7
assert m.get_NestC(c) == 35
abase = a.as_base()
assert abase.value == -2
a.as_base().value += 44
assert abase.value == 42
assert c.b.a.as_base().value == -2
c.b.a.as_base().value += 44
assert c.b.a.as_base().value == 42
del c
pytest.gc_collect()
del a # Shouldn't delete while abase is still alive
pytest.gc_collect()
assert abase.value == 42
del abase, b
pytest.gc_collect()
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_operator_overloading.py | test_operator_overloading.py |
# Python < 3 needs this: coding=utf-8
import pytest
from pybind11_tests import builtin_casters as m
from pybind11_tests import UserType, IncType
def test_simple_string():
assert m.string_roundtrip("const char *") == "const char *"
def test_unicode_conversion():
"""Tests unicode conversion and error reporting."""
assert m.good_utf8_string() == u"Say utf8‽ 🎂 𝐀"
assert m.good_utf16_string() == u"b‽🎂𝐀z"
assert m.good_utf32_string() == u"a𝐀🎂‽z"
assert m.good_wchar_string() == u"a⸘𝐀z"
if hasattr(m, "has_u8string"):
assert m.good_utf8_u8string() == u"Say utf8‽ 🎂 𝐀"
with pytest.raises(UnicodeDecodeError):
m.bad_utf8_string()
with pytest.raises(UnicodeDecodeError):
m.bad_utf16_string()
# These are provided only if they actually fail (they don't when 32-bit and under Python 2.7)
if hasattr(m, "bad_utf32_string"):
with pytest.raises(UnicodeDecodeError):
m.bad_utf32_string()
if hasattr(m, "bad_wchar_string"):
with pytest.raises(UnicodeDecodeError):
m.bad_wchar_string()
if hasattr(m, "has_u8string"):
with pytest.raises(UnicodeDecodeError):
m.bad_utf8_u8string()
assert m.u8_Z() == 'Z'
assert m.u8_eacute() == u'é'
assert m.u16_ibang() == u'‽'
assert m.u32_mathbfA() == u'𝐀'
assert m.wchar_heart() == u'♥'
if hasattr(m, "has_u8string"):
assert m.u8_char8_Z() == 'Z'
def test_single_char_arguments():
"""Tests failures for passing invalid inputs to char-accepting functions"""
def toobig_message(r):
return "Character code point not in range({0:#x})".format(r)
toolong_message = "Expected a character, but multi-character string found"
assert m.ord_char(u'a') == 0x61 # simple ASCII
assert m.ord_char_lv(u'b') == 0x62
assert m.ord_char(u'é') == 0xE9 # requires 2 bytes in utf-8, but can be stuffed in a char
with pytest.raises(ValueError) as excinfo:
assert m.ord_char(u'Ā') == 0x100 # requires 2 bytes, doesn't fit in a char
assert str(excinfo.value) == toobig_message(0x100)
with pytest.raises(ValueError) as excinfo:
assert m.ord_char(u'ab')
assert str(excinfo.value) == toolong_message
assert m.ord_char16(u'a') == 0x61
assert m.ord_char16(u'é') == 0xE9
assert m.ord_char16_lv(u'ê') == 0xEA
assert m.ord_char16(u'Ā') == 0x100
assert m.ord_char16(u'‽') == 0x203d
assert m.ord_char16(u'♥') == 0x2665
assert m.ord_char16_lv(u'♡') == 0x2661
with pytest.raises(ValueError) as excinfo:
assert m.ord_char16(u'🎂') == 0x1F382 # requires surrogate pair
assert str(excinfo.value) == toobig_message(0x10000)
with pytest.raises(ValueError) as excinfo:
assert m.ord_char16(u'aa')
assert str(excinfo.value) == toolong_message
assert m.ord_char32(u'a') == 0x61
assert m.ord_char32(u'é') == 0xE9
assert m.ord_char32(u'Ā') == 0x100
assert m.ord_char32(u'‽') == 0x203d
assert m.ord_char32(u'♥') == 0x2665
assert m.ord_char32(u'🎂') == 0x1F382
with pytest.raises(ValueError) as excinfo:
assert m.ord_char32(u'aa')
assert str(excinfo.value) == toolong_message
assert m.ord_wchar(u'a') == 0x61
assert m.ord_wchar(u'é') == 0xE9
assert m.ord_wchar(u'Ā') == 0x100
assert m.ord_wchar(u'‽') == 0x203d
assert m.ord_wchar(u'♥') == 0x2665
if m.wchar_size == 2:
with pytest.raises(ValueError) as excinfo:
assert m.ord_wchar(u'🎂') == 0x1F382 # requires surrogate pair
assert str(excinfo.value) == toobig_message(0x10000)
else:
assert m.ord_wchar(u'🎂') == 0x1F382
with pytest.raises(ValueError) as excinfo:
assert m.ord_wchar(u'aa')
assert str(excinfo.value) == toolong_message
if hasattr(m, "has_u8string"):
assert m.ord_char8(u'a') == 0x61 # simple ASCII
assert m.ord_char8_lv(u'b') == 0x62
assert m.ord_char8(u'é') == 0xE9 # requires 2 bytes in utf-8, but can be stuffed in a char
with pytest.raises(ValueError) as excinfo:
assert m.ord_char8(u'Ā') == 0x100 # requires 2 bytes, doesn't fit in a char
assert str(excinfo.value) == toobig_message(0x100)
with pytest.raises(ValueError) as excinfo:
assert m.ord_char8(u'ab')
assert str(excinfo.value) == toolong_message
def test_bytes_to_string():
"""Tests the ability to pass bytes to C++ string-accepting functions. Note that this is
one-way: the only way to return bytes to Python is via the pybind11::bytes class."""
# Issue #816
import sys
byte = bytes if sys.version_info[0] < 3 else str
assert m.strlen(byte("hi")) == 2
assert m.string_length(byte("world")) == 5
assert m.string_length(byte("a\x00b")) == 3
assert m.strlen(byte("a\x00b")) == 1 # C-string limitation
# passing in a utf8 encoded string should work
assert m.string_length(u'💩'.encode("utf8")) == 4
@pytest.mark.skipif(not hasattr(m, "has_string_view"), reason="no <string_view>")
def test_string_view(capture):
"""Tests support for C++17 string_view arguments and return values"""
assert m.string_view_chars("Hi") == [72, 105]
assert m.string_view_chars("Hi 🎂") == [72, 105, 32, 0xf0, 0x9f, 0x8e, 0x82]
assert m.string_view16_chars("Hi 🎂") == [72, 105, 32, 0xd83c, 0xdf82]
assert m.string_view32_chars("Hi 🎂") == [72, 105, 32, 127874]
if hasattr(m, "has_u8string"):
assert m.string_view8_chars("Hi") == [72, 105]
assert m.string_view8_chars("Hi 🎂") == [72, 105, 32, 0xf0, 0x9f, 0x8e, 0x82]
assert m.string_view_return() == "utf8 secret 🎂"
assert m.string_view16_return() == "utf16 secret 🎂"
assert m.string_view32_return() == "utf32 secret 🎂"
if hasattr(m, "has_u8string"):
assert m.string_view8_return() == "utf8 secret 🎂"
with capture:
m.string_view_print("Hi")
m.string_view_print("utf8 🎂")
m.string_view16_print("utf16 🎂")
m.string_view32_print("utf32 🎂")
assert capture == """
Hi 2
utf8 🎂 9
utf16 🎂 8
utf32 🎂 7
"""
if hasattr(m, "has_u8string"):
with capture:
m.string_view8_print("Hi")
m.string_view8_print("utf8 🎂")
assert capture == """
Hi 2
utf8 🎂 9
"""
with capture:
m.string_view_print("Hi, ascii")
m.string_view_print("Hi, utf8 🎂")
m.string_view16_print("Hi, utf16 🎂")
m.string_view32_print("Hi, utf32 🎂")
assert capture == """
Hi, ascii 9
Hi, utf8 🎂 13
Hi, utf16 🎂 12
Hi, utf32 🎂 11
"""
if hasattr(m, "has_u8string"):
with capture:
m.string_view8_print("Hi, ascii")
m.string_view8_print("Hi, utf8 🎂")
assert capture == """
Hi, ascii 9
Hi, utf8 🎂 13
"""
def test_integer_casting():
"""Issue #929 - out-of-range integer values shouldn't be accepted"""
import sys
assert m.i32_str(-1) == "-1"
assert m.i64_str(-1) == "-1"
assert m.i32_str(2000000000) == "2000000000"
assert m.u32_str(2000000000) == "2000000000"
if sys.version_info < (3,):
assert m.i32_str(long(-1)) == "-1" # noqa: F821 undefined name 'long'
assert m.i64_str(long(-1)) == "-1" # noqa: F821 undefined name 'long'
assert m.i64_str(long(-999999999999)) == "-999999999999" # noqa: F821 undefined name
assert m.u64_str(long(999999999999)) == "999999999999" # noqa: F821 undefined name 'long'
else:
assert m.i64_str(-999999999999) == "-999999999999"
assert m.u64_str(999999999999) == "999999999999"
with pytest.raises(TypeError) as excinfo:
m.u32_str(-1)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.u64_str(-1)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.i32_str(-3000000000)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.i32_str(3000000000)
assert "incompatible function arguments" in str(excinfo.value)
if sys.version_info < (3,):
with pytest.raises(TypeError) as excinfo:
m.u32_str(long(-1)) # noqa: F821 undefined name 'long'
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.u64_str(long(-1)) # noqa: F821 undefined name 'long'
assert "incompatible function arguments" in str(excinfo.value)
def test_tuple(doc):
"""std::pair <-> tuple & std::tuple <-> tuple"""
assert m.pair_passthrough((True, "test")) == ("test", True)
assert m.tuple_passthrough((True, "test", 5)) == (5, "test", True)
# Any sequence can be cast to a std::pair or std::tuple
assert m.pair_passthrough([True, "test"]) == ("test", True)
assert m.tuple_passthrough([True, "test", 5]) == (5, "test", True)
assert m.empty_tuple() == ()
assert doc(m.pair_passthrough) == """
pair_passthrough(arg0: Tuple[bool, str]) -> Tuple[str, bool]
Return a pair in reversed order
"""
assert doc(m.tuple_passthrough) == """
tuple_passthrough(arg0: Tuple[bool, str, int]) -> Tuple[int, str, bool]
Return a triple in reversed order
"""
assert m.rvalue_pair() == ("rvalue", "rvalue")
assert m.lvalue_pair() == ("lvalue", "lvalue")
assert m.rvalue_tuple() == ("rvalue", "rvalue", "rvalue")
assert m.lvalue_tuple() == ("lvalue", "lvalue", "lvalue")
assert m.rvalue_nested() == ("rvalue", ("rvalue", ("rvalue", "rvalue")))
assert m.lvalue_nested() == ("lvalue", ("lvalue", ("lvalue", "lvalue")))
def test_builtins_cast_return_none():
"""Casters produced with PYBIND11_TYPE_CASTER() should convert nullptr to None"""
assert m.return_none_string() is None
assert m.return_none_char() is None
assert m.return_none_bool() is None
assert m.return_none_int() is None
assert m.return_none_float() is None
def test_none_deferred():
"""None passed as various argument types should defer to other overloads"""
assert not m.defer_none_cstring("abc")
assert m.defer_none_cstring(None)
assert not m.defer_none_custom(UserType())
assert m.defer_none_custom(None)
assert m.nodefer_none_void(None)
def test_void_caster():
assert m.load_nullptr_t(None) is None
assert m.cast_nullptr_t() is None
def test_reference_wrapper():
"""std::reference_wrapper for builtin and user types"""
assert m.refwrap_builtin(42) == 420
assert m.refwrap_usertype(UserType(42)) == 42
with pytest.raises(TypeError) as excinfo:
m.refwrap_builtin(None)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.refwrap_usertype(None)
assert "incompatible function arguments" in str(excinfo.value)
a1 = m.refwrap_list(copy=True)
a2 = m.refwrap_list(copy=True)
assert [x.value for x in a1] == [2, 3]
assert [x.value for x in a2] == [2, 3]
assert not a1[0] is a2[0] and not a1[1] is a2[1]
b1 = m.refwrap_list(copy=False)
b2 = m.refwrap_list(copy=False)
assert [x.value for x in b1] == [1, 2]
assert [x.value for x in b2] == [1, 2]
assert b1[0] is b2[0] and b1[1] is b2[1]
assert m.refwrap_iiw(IncType(5)) == 5
assert m.refwrap_call_iiw(IncType(10), m.refwrap_iiw) == [10, 10, 10, 10]
def test_complex_cast():
"""std::complex casts"""
assert m.complex_cast(1) == "1.0"
assert m.complex_cast(2j) == "(0.0, 2.0)"
def test_bool_caster():
"""Test bool caster implicit conversions."""
convert, noconvert = m.bool_passthrough, m.bool_passthrough_noconvert
def require_implicit(v):
pytest.raises(TypeError, noconvert, v)
def cant_convert(v):
pytest.raises(TypeError, convert, v)
# straight up bool
assert convert(True) is True
assert convert(False) is False
assert noconvert(True) is True
assert noconvert(False) is False
# None requires implicit conversion
require_implicit(None)
assert convert(None) is False
class A(object):
def __init__(self, x):
self.x = x
def __nonzero__(self):
return self.x
def __bool__(self):
return self.x
class B(object):
pass
# Arbitrary objects are not accepted
cant_convert(object())
cant_convert(B())
# Objects with __nonzero__ / __bool__ defined can be converted
require_implicit(A(True))
assert convert(A(True)) is True
assert convert(A(False)) is False
@pytest.requires_numpy
def test_numpy_bool():
import numpy as np
convert, noconvert = m.bool_passthrough, m.bool_passthrough_noconvert
def cant_convert(v):
pytest.raises(TypeError, convert, v)
# np.bool_ is not considered implicit
assert convert(np.bool_(True)) is True
assert convert(np.bool_(False)) is False
assert noconvert(np.bool_(True)) is True
assert noconvert(np.bool_(False)) is False
cant_convert(np.zeros(2, dtype='int'))
def test_int_long():
"""In Python 2, a C++ int should return a Python int rather than long
if possible: longs are not always accepted where ints are used (such
as the argument to sys.exit()). A C++ long long is always a Python
long."""
import sys
must_be_long = type(getattr(sys, 'maxint', 1) + 1)
assert isinstance(m.int_cast(), int)
assert isinstance(m.long_cast(), int)
assert isinstance(m.longlong_cast(), must_be_long)
def test_void_caster_2():
assert m.test_void_caster()
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_builtin_casters.py | test_builtin_casters.py |
from pybind11_tests import tagbased_polymorphic as m
def test_downcast():
zoo = m.create_zoo()
assert [type(animal) for animal in zoo] == [
m.Labrador, m.Dog, m.Chihuahua, m.Cat, m.Panther
]
assert [animal.name for animal in zoo] == [
"Fido", "Ginger", "Hertzl", "Tiger", "Leo"
]
zoo[1].sound = "woooooo"
assert [dog.bark() for dog in zoo[:3]] == [
"Labrador Fido goes WOOF!",
"Dog Ginger goes woooooo",
"Chihuahua Hertzl goes iyiyiyiyiyi and runs in circles"
]
assert [cat.purr() for cat in zoo[3:]] == ["mrowr", "mrrrRRRRRR"]
zoo[0].excitement -= 1000
assert zoo[0].excitement == 14000
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_tagbased_polymorphic.py | test_tagbased_polymorphic.py |
import pytest
import sys
from pybind11_tests import stl_binders as m
with pytest.suppress(ImportError):
import numpy as np
def test_vector_int():
v_int = m.VectorInt([0, 0])
assert len(v_int) == 2
assert bool(v_int) is True
# test construction from a generator
v_int1 = m.VectorInt(x for x in range(5))
assert v_int1 == m.VectorInt([0, 1, 2, 3, 4])
v_int2 = m.VectorInt([0, 0])
assert v_int == v_int2
v_int2[1] = 1
assert v_int != v_int2
v_int2.append(2)
v_int2.insert(0, 1)
v_int2.insert(0, 2)
v_int2.insert(0, 3)
v_int2.insert(6, 3)
assert str(v_int2) == "VectorInt[3, 2, 1, 0, 1, 2, 3]"
with pytest.raises(IndexError):
v_int2.insert(8, 4)
v_int.append(99)
v_int2[2:-2] = v_int
assert v_int2 == m.VectorInt([3, 2, 0, 0, 99, 2, 3])
del v_int2[1:3]
assert v_int2 == m.VectorInt([3, 0, 99, 2, 3])
del v_int2[0]
assert v_int2 == m.VectorInt([0, 99, 2, 3])
v_int2.extend(m.VectorInt([4, 5]))
assert v_int2 == m.VectorInt([0, 99, 2, 3, 4, 5])
v_int2.extend([6, 7])
assert v_int2 == m.VectorInt([0, 99, 2, 3, 4, 5, 6, 7])
# test error handling, and that the vector is unchanged
with pytest.raises(RuntimeError):
v_int2.extend([8, 'a'])
assert v_int2 == m.VectorInt([0, 99, 2, 3, 4, 5, 6, 7])
# test extending from a generator
v_int2.extend(x for x in range(5))
assert v_int2 == m.VectorInt([0, 99, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4])
# test negative indexing
assert v_int2[-1] == 4
# insert with negative index
v_int2.insert(-1, 88)
assert v_int2 == m.VectorInt([0, 99, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 88, 4])
# delete negative index
del v_int2[-1]
assert v_int2 == m.VectorInt([0, 99, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 88])
v_int2.clear()
assert len(v_int2) == 0
# related to the PyPy's buffer protocol.
@pytest.unsupported_on_pypy
def test_vector_buffer():
b = bytearray([1, 2, 3, 4])
v = m.VectorUChar(b)
assert v[1] == 2
v[2] = 5
mv = memoryview(v) # We expose the buffer interface
if sys.version_info.major > 2:
assert mv[2] == 5
mv[2] = 6
else:
assert mv[2] == '\x05'
mv[2] = '\x06'
assert v[2] == 6
with pytest.raises(RuntimeError) as excinfo:
m.create_undeclstruct() # Undeclared struct contents, no buffer interface
assert "NumPy type info missing for " in str(excinfo.value)
@pytest.unsupported_on_pypy
@pytest.requires_numpy
def test_vector_buffer_numpy():
a = np.array([1, 2, 3, 4], dtype=np.int32)
with pytest.raises(TypeError):
m.VectorInt(a)
a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], dtype=np.uintc)
v = m.VectorInt(a[0, :])
assert len(v) == 4
assert v[2] == 3
ma = np.asarray(v)
ma[2] = 5
assert v[2] == 5
v = m.VectorInt(a[:, 1])
assert len(v) == 3
assert v[2] == 10
v = m.get_vectorstruct()
assert v[0].x == 5
ma = np.asarray(v)
ma[1]['x'] = 99
assert v[1].x == 99
v = m.VectorStruct(np.zeros(3, dtype=np.dtype([('w', 'bool'), ('x', 'I'),
('y', 'float64'), ('z', 'bool')], align=True)))
assert len(v) == 3
def test_vector_bool():
import pybind11_cross_module_tests as cm
vv_c = cm.VectorBool()
for i in range(10):
vv_c.append(i % 2 == 0)
for i in range(10):
assert vv_c[i] == (i % 2 == 0)
assert str(vv_c) == "VectorBool[1, 0, 1, 0, 1, 0, 1, 0, 1, 0]"
def test_vector_custom():
v_a = m.VectorEl()
v_a.append(m.El(1))
v_a.append(m.El(2))
assert str(v_a) == "VectorEl[El{1}, El{2}]"
vv_a = m.VectorVectorEl()
vv_a.append(v_a)
vv_b = vv_a[0]
assert str(vv_b) == "VectorEl[El{1}, El{2}]"
def test_map_string_double():
mm = m.MapStringDouble()
mm['a'] = 1
mm['b'] = 2.5
assert list(mm) == ['a', 'b']
assert list(mm.items()) == [('a', 1), ('b', 2.5)]
assert str(mm) == "MapStringDouble{a: 1, b: 2.5}"
um = m.UnorderedMapStringDouble()
um['ua'] = 1.1
um['ub'] = 2.6
assert sorted(list(um)) == ['ua', 'ub']
assert sorted(list(um.items())) == [('ua', 1.1), ('ub', 2.6)]
assert "UnorderedMapStringDouble" in str(um)
def test_map_string_double_const():
mc = m.MapStringDoubleConst()
mc['a'] = 10
mc['b'] = 20.5
assert str(mc) == "MapStringDoubleConst{a: 10, b: 20.5}"
umc = m.UnorderedMapStringDoubleConst()
umc['a'] = 11
umc['b'] = 21.5
str(umc)
def test_noncopyable_containers():
# std::vector
vnc = m.get_vnc(5)
for i in range(0, 5):
assert vnc[i].value == i + 1
for i, j in enumerate(vnc, start=1):
assert j.value == i
# std::deque
dnc = m.get_dnc(5)
for i in range(0, 5):
assert dnc[i].value == i + 1
i = 1
for j in dnc:
assert(j.value == i)
i += 1
# std::map
mnc = m.get_mnc(5)
for i in range(1, 6):
assert mnc[i].value == 10 * i
vsum = 0
for k, v in mnc.items():
assert v.value == 10 * k
vsum += v.value
assert vsum == 150
# std::unordered_map
mnc = m.get_umnc(5)
for i in range(1, 6):
assert mnc[i].value == 10 * i
vsum = 0
for k, v in mnc.items():
assert v.value == 10 * k
vsum += v.value
assert vsum == 150
# nested std::map<std::vector>
nvnc = m.get_nvnc(5)
for i in range(1, 6):
for j in range(0, 5):
assert nvnc[i][j].value == j + 1
for k, v in nvnc.items():
for i, j in enumerate(v, start=1):
assert j.value == i
# nested std::map<std::map>
nmnc = m.get_nmnc(5)
for i in range(1, 6):
for j in range(10, 60, 10):
assert nmnc[i][j].value == 10 * j
vsum = 0
for k_o, v_o in nmnc.items():
for k_i, v_i in v_o.items():
assert v_i.value == 10 * k_i
vsum += v_i.value
assert vsum == 7500
# nested std::unordered_map<std::unordered_map>
numnc = m.get_numnc(5)
for i in range(1, 6):
for j in range(10, 60, 10):
assert numnc[i][j].value == 10 * j
vsum = 0
for k_o, v_o in numnc.items():
for k_i, v_i in v_o.items():
assert v_i.value == 10 * k_i
vsum += v_i.value
assert vsum == 7500
def test_map_delitem():
mm = m.MapStringDouble()
mm['a'] = 1
mm['b'] = 2.5
assert list(mm) == ['a', 'b']
assert list(mm.items()) == [('a', 1), ('b', 2.5)]
del mm['a']
assert list(mm) == ['b']
assert list(mm.items()) == [('b', 2.5)]
um = m.UnorderedMapStringDouble()
um['ua'] = 1.1
um['ub'] = 2.6
assert sorted(list(um)) == ['ua', 'ub']
assert sorted(list(um.items())) == [('ua', 1.1), ('ub', 2.6)]
del um['ua']
assert sorted(list(um)) == ['ub']
assert sorted(list(um.items())) == [('ub', 2.6)]
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_stl_binders.py | test_stl_binders.py |
import pytest
from pybind11_tests import smart_ptr as m
from pybind11_tests import ConstructorStats
def test_smart_ptr(capture):
# Object1
for i, o in enumerate([m.make_object_1(), m.make_object_2(), m.MyObject1(3)], start=1):
assert o.getRefCount() == 1
with capture:
m.print_object_1(o)
m.print_object_2(o)
m.print_object_3(o)
m.print_object_4(o)
assert capture == "MyObject1[{i}]\n".format(i=i) * 4
for i, o in enumerate([m.make_myobject1_1(), m.make_myobject1_2(), m.MyObject1(6), 7],
start=4):
print(o)
with capture:
if not isinstance(o, int):
m.print_object_1(o)
m.print_object_2(o)
m.print_object_3(o)
m.print_object_4(o)
m.print_myobject1_1(o)
m.print_myobject1_2(o)
m.print_myobject1_3(o)
m.print_myobject1_4(o)
assert capture == "MyObject1[{i}]\n".format(i=i) * (4 if isinstance(o, int) else 8)
cstats = ConstructorStats.get(m.MyObject1)
assert cstats.alive() == 0
expected_values = ['MyObject1[{}]'.format(i) for i in range(1, 7)] + ['MyObject1[7]'] * 4
assert cstats.values() == expected_values
assert cstats.default_constructions == 0
assert cstats.copy_constructions == 0
# assert cstats.move_constructions >= 0 # Doesn't invoke any
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
# Object2
for i, o in zip([8, 6, 7], [m.MyObject2(8), m.make_myobject2_1(), m.make_myobject2_2()]):
print(o)
with capture:
m.print_myobject2_1(o)
m.print_myobject2_2(o)
m.print_myobject2_3(o)
m.print_myobject2_4(o)
assert capture == "MyObject2[{i}]\n".format(i=i) * 4
cstats = ConstructorStats.get(m.MyObject2)
assert cstats.alive() == 1
o = None
assert cstats.alive() == 0
assert cstats.values() == ['MyObject2[8]', 'MyObject2[6]', 'MyObject2[7]']
assert cstats.default_constructions == 0
assert cstats.copy_constructions == 0
# assert cstats.move_constructions >= 0 # Doesn't invoke any
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
# Object3
for i, o in zip([9, 8, 9], [m.MyObject3(9), m.make_myobject3_1(), m.make_myobject3_2()]):
print(o)
with capture:
m.print_myobject3_1(o)
m.print_myobject3_2(o)
m.print_myobject3_3(o)
m.print_myobject3_4(o)
assert capture == "MyObject3[{i}]\n".format(i=i) * 4
cstats = ConstructorStats.get(m.MyObject3)
assert cstats.alive() == 1
o = None
assert cstats.alive() == 0
assert cstats.values() == ['MyObject3[9]', 'MyObject3[8]', 'MyObject3[9]']
assert cstats.default_constructions == 0
assert cstats.copy_constructions == 0
# assert cstats.move_constructions >= 0 # Doesn't invoke any
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
# Object
cstats = ConstructorStats.get(m.Object)
assert cstats.alive() == 0
assert cstats.values() == []
assert cstats.default_constructions == 10
assert cstats.copy_constructions == 0
# assert cstats.move_constructions >= 0 # Doesn't invoke any
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
# ref<>
cstats = m.cstats_ref()
assert cstats.alive() == 0
assert cstats.values() == ['from pointer'] * 10
assert cstats.default_constructions == 30
assert cstats.copy_constructions == 12
# assert cstats.move_constructions >= 0 # Doesn't invoke any
assert cstats.copy_assignments == 30
assert cstats.move_assignments == 0
def test_smart_ptr_refcounting():
assert m.test_object1_refcounting()
def test_unique_nodelete():
o = m.MyObject4(23)
assert o.value == 23
cstats = ConstructorStats.get(m.MyObject4)
assert cstats.alive() == 1
del o
assert cstats.alive() == 1 # Leak, but that's intentional
def test_unique_nodelete4a():
o = m.MyObject4a(23)
assert o.value == 23
cstats = ConstructorStats.get(m.MyObject4a)
assert cstats.alive() == 1
del o
assert cstats.alive() == 1 # Leak, but that's intentional
def test_unique_deleter():
o = m.MyObject4b(23)
assert o.value == 23
cstats4a = ConstructorStats.get(m.MyObject4a)
assert cstats4a.alive() == 2 # Two because of previous test
cstats4b = ConstructorStats.get(m.MyObject4b)
assert cstats4b.alive() == 1
del o
assert cstats4a.alive() == 1 # Should now only be one leftover from previous test
assert cstats4b.alive() == 0 # Should be deleted
def test_large_holder():
o = m.MyObject5(5)
assert o.value == 5
cstats = ConstructorStats.get(m.MyObject5)
assert cstats.alive() == 1
del o
assert cstats.alive() == 0
def test_shared_ptr_and_references():
s = m.SharedPtrRef()
stats = ConstructorStats.get(m.A)
assert stats.alive() == 2
ref = s.ref # init_holder_helper(holder_ptr=false, owned=false)
assert stats.alive() == 2
assert s.set_ref(ref)
with pytest.raises(RuntimeError) as excinfo:
assert s.set_holder(ref)
assert "Unable to cast from non-held to held instance" in str(excinfo.value)
copy = s.copy # init_holder_helper(holder_ptr=false, owned=true)
assert stats.alive() == 3
assert s.set_ref(copy)
assert s.set_holder(copy)
holder_ref = s.holder_ref # init_holder_helper(holder_ptr=true, owned=false)
assert stats.alive() == 3
assert s.set_ref(holder_ref)
assert s.set_holder(holder_ref)
holder_copy = s.holder_copy # init_holder_helper(holder_ptr=true, owned=true)
assert stats.alive() == 3
assert s.set_ref(holder_copy)
assert s.set_holder(holder_copy)
del ref, copy, holder_ref, holder_copy, s
assert stats.alive() == 0
def test_shared_ptr_from_this_and_references():
s = m.SharedFromThisRef()
stats = ConstructorStats.get(m.B)
assert stats.alive() == 2
ref = s.ref # init_holder_helper(holder_ptr=false, owned=false, bad_wp=false)
assert stats.alive() == 2
assert s.set_ref(ref)
assert s.set_holder(ref) # std::enable_shared_from_this can create a holder from a reference
bad_wp = s.bad_wp # init_holder_helper(holder_ptr=false, owned=false, bad_wp=true)
assert stats.alive() == 2
assert s.set_ref(bad_wp)
with pytest.raises(RuntimeError) as excinfo:
assert s.set_holder(bad_wp)
assert "Unable to cast from non-held to held instance" in str(excinfo.value)
copy = s.copy # init_holder_helper(holder_ptr=false, owned=true, bad_wp=false)
assert stats.alive() == 3
assert s.set_ref(copy)
assert s.set_holder(copy)
holder_ref = s.holder_ref # init_holder_helper(holder_ptr=true, owned=false, bad_wp=false)
assert stats.alive() == 3
assert s.set_ref(holder_ref)
assert s.set_holder(holder_ref)
holder_copy = s.holder_copy # init_holder_helper(holder_ptr=true, owned=true, bad_wp=false)
assert stats.alive() == 3
assert s.set_ref(holder_copy)
assert s.set_holder(holder_copy)
del ref, bad_wp, copy, holder_ref, holder_copy, s
assert stats.alive() == 0
z = m.SharedFromThisVirt.get()
y = m.SharedFromThisVirt.get()
assert y is z
def test_move_only_holder():
a = m.TypeWithMoveOnlyHolder.make()
stats = ConstructorStats.get(m.TypeWithMoveOnlyHolder)
assert stats.alive() == 1
del a
assert stats.alive() == 0
def test_holder_with_addressof_operator():
# this test must not throw exception from c++
a = m.TypeForHolderWithAddressOf.make()
a.print_object_1()
a.print_object_2()
a.print_object_3()
a.print_object_4()
stats = ConstructorStats.get(m.TypeForHolderWithAddressOf)
assert stats.alive() == 1
np = m.TypeForHolderWithAddressOf.make()
assert stats.alive() == 2
del a
assert stats.alive() == 1
del np
assert stats.alive() == 0
b = m.TypeForHolderWithAddressOf.make()
c = b
assert b.get() is c.get()
assert stats.alive() == 1
del b
assert stats.alive() == 1
del c
assert stats.alive() == 0
def test_move_only_holder_with_addressof_operator():
a = m.TypeForMoveOnlyHolderWithAddressOf.make()
a.print_object()
stats = ConstructorStats.get(m.TypeForMoveOnlyHolderWithAddressOf)
assert stats.alive() == 1
a.value = 42
assert a.value == 42
del a
assert stats.alive() == 0
def test_smart_ptr_from_default():
instance = m.HeldByDefaultHolder()
with pytest.raises(RuntimeError) as excinfo:
m.HeldByDefaultHolder.load_shared_ptr(instance)
assert "Unable to load a custom holder type from a " \
"default-holder instance" in str(excinfo.value)
def test_shared_ptr_gc():
"""#187: issue involving std::shared_ptr<> return value policy & garbage collection"""
el = m.ElementList()
for i in range(10):
el.add(m.ElementA(i))
pytest.gc_collect()
for i, v in enumerate(el.get()):
assert i == v.value()
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_smart_ptr.py | test_smart_ptr.py |
import pytest
from pybind11_tests import sequences_and_iterators as m
from pybind11_tests import ConstructorStats
def isclose(a, b, rel_tol=1e-05, abs_tol=0.0):
"""Like math.isclose() from Python 3.5"""
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def allclose(a_list, b_list, rel_tol=1e-05, abs_tol=0.0):
return all(isclose(a, b, rel_tol=rel_tol, abs_tol=abs_tol) for a, b in zip(a_list, b_list))
def test_generalized_iterators():
assert list(m.IntPairs([(1, 2), (3, 4), (0, 5)]).nonzero()) == [(1, 2), (3, 4)]
assert list(m.IntPairs([(1, 2), (2, 0), (0, 3), (4, 5)]).nonzero()) == [(1, 2)]
assert list(m.IntPairs([(0, 3), (1, 2), (3, 4)]).nonzero()) == []
assert list(m.IntPairs([(1, 2), (3, 4), (0, 5)]).nonzero_keys()) == [1, 3]
assert list(m.IntPairs([(1, 2), (2, 0), (0, 3), (4, 5)]).nonzero_keys()) == [1]
assert list(m.IntPairs([(0, 3), (1, 2), (3, 4)]).nonzero_keys()) == []
# __next__ must continue to raise StopIteration
it = m.IntPairs([(0, 0)]).nonzero()
for _ in range(3):
with pytest.raises(StopIteration):
next(it)
it = m.IntPairs([(0, 0)]).nonzero_keys()
for _ in range(3):
with pytest.raises(StopIteration):
next(it)
def test_sliceable():
sliceable = m.Sliceable(100)
assert sliceable[::] == (0, 100, 1)
assert sliceable[10::] == (10, 100, 1)
assert sliceable[:10:] == (0, 10, 1)
assert sliceable[::10] == (0, 100, 10)
assert sliceable[-10::] == (90, 100, 1)
assert sliceable[:-10:] == (0, 90, 1)
assert sliceable[::-10] == (99, -1, -10)
assert sliceable[50:60:1] == (50, 60, 1)
assert sliceable[50:60:-1] == (50, 60, -1)
def test_sequence():
cstats = ConstructorStats.get(m.Sequence)
s = m.Sequence(5)
assert cstats.values() == ['of size', '5']
assert "Sequence" in repr(s)
assert len(s) == 5
assert s[0] == 0 and s[3] == 0
assert 12.34 not in s
s[0], s[3] = 12.34, 56.78
assert 12.34 in s
assert isclose(s[0], 12.34) and isclose(s[3], 56.78)
rev = reversed(s)
assert cstats.values() == ['of size', '5']
rev2 = s[::-1]
assert cstats.values() == ['of size', '5']
it = iter(m.Sequence(0))
for _ in range(3): # __next__ must continue to raise StopIteration
with pytest.raises(StopIteration):
next(it)
assert cstats.values() == ['of size', '0']
expected = [0, 56.78, 0, 0, 12.34]
assert allclose(rev, expected)
assert allclose(rev2, expected)
assert rev == rev2
rev[0::2] = m.Sequence([2.0, 2.0, 2.0])
assert cstats.values() == ['of size', '3', 'from std::vector']
assert allclose(rev, [2, 56.78, 2, 0, 2])
assert cstats.alive() == 4
del it
assert cstats.alive() == 3
del s
assert cstats.alive() == 2
del rev
assert cstats.alive() == 1
del rev2
assert cstats.alive() == 0
assert cstats.values() == []
assert cstats.default_constructions == 0
assert cstats.copy_constructions == 0
assert cstats.move_constructions >= 1
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
def test_map_iterator():
sm = m.StringMap({'hi': 'bye', 'black': 'white'})
assert sm['hi'] == 'bye'
assert len(sm) == 2
assert sm['black'] == 'white'
with pytest.raises(KeyError):
assert sm['orange']
sm['orange'] = 'banana'
assert sm['orange'] == 'banana'
expected = {'hi': 'bye', 'black': 'white', 'orange': 'banana'}
for k in sm:
assert sm[k] == expected[k]
for k, v in sm.items():
assert v == expected[k]
it = iter(m.StringMap({}))
for _ in range(3): # __next__ must continue to raise StopIteration
with pytest.raises(StopIteration):
next(it)
def test_python_iterator_in_cpp():
t = (1, 2, 3)
assert m.object_to_list(t) == [1, 2, 3]
assert m.object_to_list(iter(t)) == [1, 2, 3]
assert m.iterator_to_list(iter(t)) == [1, 2, 3]
with pytest.raises(TypeError) as excinfo:
m.object_to_list(1)
assert "object is not iterable" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.iterator_to_list(1)
assert "incompatible function arguments" in str(excinfo.value)
def bad_next_call():
raise RuntimeError("py::iterator::advance() should propagate errors")
with pytest.raises(RuntimeError) as excinfo:
m.iterator_to_list(iter(bad_next_call, None))
assert str(excinfo.value) == "py::iterator::advance() should propagate errors"
lst = [1, None, 0, None]
assert m.count_none(lst) == 2
assert m.find_none(lst) is True
assert m.count_nonzeros({"a": 0, "b": 1, "c": 2}) == 2
r = range(5)
assert all(m.tuple_iterator(tuple(r)))
assert all(m.list_iterator(list(r)))
assert all(m.sequence_iterator(r))
def test_iterator_passthrough():
"""#181: iterator passthrough did not compile"""
from pybind11_tests.sequences_and_iterators import iterator_passthrough
assert list(iterator_passthrough(iter([3, 5, 7, 9, 11, 13, 15]))) == [3, 5, 7, 9, 11, 13, 15]
def test_iterator_rvp():
"""#388: Can't make iterators via make_iterator() with different r/v policies """
import pybind11_tests.sequences_and_iterators as m
assert list(m.make_iterator_1()) == [1, 2, 3]
assert list(m.make_iterator_2()) == [1, 2, 3]
assert not isinstance(m.make_iterator_1(), type(m.make_iterator_2()))
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_sequences_and_iterators.py | test_sequences_and_iterators.py |
from pybind11_tests import docstring_options as m
def test_docstring_options():
# options.disable_function_signatures()
assert not m.test_function1.__doc__
assert m.test_function2.__doc__ == "A custom docstring"
# docstring specified on just the first overload definition:
assert m.test_overloaded1.__doc__ == "Overload docstring"
# docstring on both overloads:
assert m.test_overloaded2.__doc__ == "overload docstring 1\noverload docstring 2"
# docstring on only second overload:
assert m.test_overloaded3.__doc__ == "Overload docstr"
# options.enable_function_signatures()
assert m.test_function3.__doc__ .startswith("test_function3(a: int, b: int) -> None")
assert m.test_function4.__doc__ .startswith("test_function4(a: int, b: int) -> None")
assert m.test_function4.__doc__ .endswith("A custom docstring\n")
# options.disable_function_signatures()
# options.disable_user_defined_docstrings()
assert not m.test_function5.__doc__
# nested options.enable_user_defined_docstrings()
assert m.test_function6.__doc__ == "A custom docstring"
# RAII destructor
assert m.test_function7.__doc__ .startswith("test_function7(a: int, b: int) -> None")
assert m.test_function7.__doc__ .endswith("A custom docstring\n")
# Suppression of user-defined docstrings for non-function objects
assert not m.DocstringTestFoo.__doc__
assert not m.DocstringTestFoo.value_prop.__doc__
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_docstring_options.py | test_docstring_options.py |
import re
import pytest
from pybind11_tests import numpy_dtypes as m
pytestmark = pytest.requires_numpy
with pytest.suppress(ImportError):
import numpy as np
@pytest.fixture(scope='module')
def simple_dtype():
ld = np.dtype('longdouble')
return np.dtype({'names': ['bool_', 'uint_', 'float_', 'ldbl_'],
'formats': ['?', 'u4', 'f4', 'f{}'.format(ld.itemsize)],
'offsets': [0, 4, 8, (16 if ld.alignment > 4 else 12)]})
@pytest.fixture(scope='module')
def packed_dtype():
return np.dtype([('bool_', '?'), ('uint_', 'u4'), ('float_', 'f4'), ('ldbl_', 'g')])
def dt_fmt():
from sys import byteorder
e = '<' if byteorder == 'little' else '>'
return ("{{'names':['bool_','uint_','float_','ldbl_'],"
" 'formats':['?','" + e + "u4','" + e + "f4','" + e + "f{}'],"
" 'offsets':[0,4,8,{}], 'itemsize':{}}}")
def simple_dtype_fmt():
ld = np.dtype('longdouble')
simple_ld_off = 12 + 4 * (ld.alignment > 4)
return dt_fmt().format(ld.itemsize, simple_ld_off, simple_ld_off + ld.itemsize)
def packed_dtype_fmt():
from sys import byteorder
return "[('bool_', '?'), ('uint_', '{e}u4'), ('float_', '{e}f4'), ('ldbl_', '{e}f{}')]".format(
np.dtype('longdouble').itemsize, e='<' if byteorder == 'little' else '>')
def partial_ld_offset():
return 12 + 4 * (np.dtype('uint64').alignment > 4) + 8 + 8 * (
np.dtype('longdouble').alignment > 8)
def partial_dtype_fmt():
ld = np.dtype('longdouble')
partial_ld_off = partial_ld_offset()
return dt_fmt().format(ld.itemsize, partial_ld_off, partial_ld_off + ld.itemsize)
def partial_nested_fmt():
ld = np.dtype('longdouble')
partial_nested_off = 8 + 8 * (ld.alignment > 8)
partial_ld_off = partial_ld_offset()
partial_nested_size = partial_nested_off * 2 + partial_ld_off + ld.itemsize
return "{{'names':['a'], 'formats':[{}], 'offsets':[{}], 'itemsize':{}}}".format(
partial_dtype_fmt(), partial_nested_off, partial_nested_size)
def assert_equal(actual, expected_data, expected_dtype):
np.testing.assert_equal(actual, np.array(expected_data, dtype=expected_dtype))
def test_format_descriptors():
with pytest.raises(RuntimeError) as excinfo:
m.get_format_unbound()
assert re.match('^NumPy type info missing for .*UnboundStruct.*$', str(excinfo.value))
ld = np.dtype('longdouble')
ldbl_fmt = ('4x' if ld.alignment > 4 else '') + ld.char
ss_fmt = "^T{?:bool_:3xI:uint_:f:float_:" + ldbl_fmt + ":ldbl_:}"
dbl = np.dtype('double')
partial_fmt = ("^T{?:bool_:3xI:uint_:f:float_:" +
str(4 * (dbl.alignment > 4) + dbl.itemsize + 8 * (ld.alignment > 8)) +
"xg:ldbl_:}")
nested_extra = str(max(8, ld.alignment))
assert m.print_format_descriptors() == [
ss_fmt,
"^T{?:bool_:I:uint_:f:float_:g:ldbl_:}",
"^T{" + ss_fmt + ":a:^T{?:bool_:I:uint_:f:float_:g:ldbl_:}:b:}",
partial_fmt,
"^T{" + nested_extra + "x" + partial_fmt + ":a:" + nested_extra + "x}",
"^T{3s:a:3s:b:}",
"^T{(3)4s:a:(2)i:b:(3)B:c:1x(4, 2)f:d:}",
'^T{q:e1:B:e2:}',
'^T{Zf:cflt:Zd:cdbl:}'
]
def test_dtype(simple_dtype):
from sys import byteorder
e = '<' if byteorder == 'little' else '>'
assert m.print_dtypes() == [
simple_dtype_fmt(),
packed_dtype_fmt(),
"[('a', {}), ('b', {})]".format(simple_dtype_fmt(), packed_dtype_fmt()),
partial_dtype_fmt(),
partial_nested_fmt(),
"[('a', 'S3'), ('b', 'S3')]",
("{{'names':['a','b','c','d'], " +
"'formats':[('S4', (3,)),('" + e + "i4', (2,)),('u1', (3,)),('" + e + "f4', (4, 2))], " +
"'offsets':[0,12,20,24], 'itemsize':56}}").format(e=e),
"[('e1', '" + e + "i8'), ('e2', 'u1')]",
"[('x', 'i1'), ('y', '" + e + "u8')]",
"[('cflt', '" + e + "c8'), ('cdbl', '" + e + "c16')]"
]
d1 = np.dtype({'names': ['a', 'b'], 'formats': ['int32', 'float64'],
'offsets': [1, 10], 'itemsize': 20})
d2 = np.dtype([('a', 'i4'), ('b', 'f4')])
assert m.test_dtype_ctors() == [np.dtype('int32'), np.dtype('float64'),
np.dtype('bool'), d1, d1, np.dtype('uint32'), d2]
assert m.test_dtype_methods() == [np.dtype('int32'), simple_dtype, False, True,
np.dtype('int32').itemsize, simple_dtype.itemsize]
assert m.trailing_padding_dtype() == m.buffer_to_dtype(np.zeros(1, m.trailing_padding_dtype()))
def test_recarray(simple_dtype, packed_dtype):
elements = [(False, 0, 0.0, -0.0), (True, 1, 1.5, -2.5), (False, 2, 3.0, -5.0)]
for func, dtype in [(m.create_rec_simple, simple_dtype), (m.create_rec_packed, packed_dtype)]:
arr = func(0)
assert arr.dtype == dtype
assert_equal(arr, [], simple_dtype)
assert_equal(arr, [], packed_dtype)
arr = func(3)
assert arr.dtype == dtype
assert_equal(arr, elements, simple_dtype)
assert_equal(arr, elements, packed_dtype)
if dtype == simple_dtype:
assert m.print_rec_simple(arr) == [
"s:0,0,0,-0",
"s:1,1,1.5,-2.5",
"s:0,2,3,-5"
]
else:
assert m.print_rec_packed(arr) == [
"p:0,0,0,-0",
"p:1,1,1.5,-2.5",
"p:0,2,3,-5"
]
nested_dtype = np.dtype([('a', simple_dtype), ('b', packed_dtype)])
arr = m.create_rec_nested(0)
assert arr.dtype == nested_dtype
assert_equal(arr, [], nested_dtype)
arr = m.create_rec_nested(3)
assert arr.dtype == nested_dtype
assert_equal(arr, [((False, 0, 0.0, -0.0), (True, 1, 1.5, -2.5)),
((True, 1, 1.5, -2.5), (False, 2, 3.0, -5.0)),
((False, 2, 3.0, -5.0), (True, 3, 4.5, -7.5))], nested_dtype)
assert m.print_rec_nested(arr) == [
"n:a=s:0,0,0,-0;b=p:1,1,1.5,-2.5",
"n:a=s:1,1,1.5,-2.5;b=p:0,2,3,-5",
"n:a=s:0,2,3,-5;b=p:1,3,4.5,-7.5"
]
arr = m.create_rec_partial(3)
assert str(arr.dtype) == partial_dtype_fmt()
partial_dtype = arr.dtype
assert '' not in arr.dtype.fields
assert partial_dtype.itemsize > simple_dtype.itemsize
assert_equal(arr, elements, simple_dtype)
assert_equal(arr, elements, packed_dtype)
arr = m.create_rec_partial_nested(3)
assert str(arr.dtype) == partial_nested_fmt()
assert '' not in arr.dtype.fields
assert '' not in arr.dtype.fields['a'][0].fields
assert arr.dtype.itemsize > partial_dtype.itemsize
np.testing.assert_equal(arr['a'], m.create_rec_partial(3))
def test_array_constructors():
data = np.arange(1, 7, dtype='int32')
for i in range(8):
np.testing.assert_array_equal(m.test_array_ctors(10 + i), data.reshape((3, 2)))
np.testing.assert_array_equal(m.test_array_ctors(20 + i), data.reshape((3, 2)))
for i in range(5):
np.testing.assert_array_equal(m.test_array_ctors(30 + i), data)
np.testing.assert_array_equal(m.test_array_ctors(40 + i), data)
def test_string_array():
arr = m.create_string_array(True)
assert str(arr.dtype) == "[('a', 'S3'), ('b', 'S3')]"
assert m.print_string_array(arr) == [
"a='',b=''",
"a='a',b='a'",
"a='ab',b='ab'",
"a='abc',b='abc'"
]
dtype = arr.dtype
assert arr['a'].tolist() == [b'', b'a', b'ab', b'abc']
assert arr['b'].tolist() == [b'', b'a', b'ab', b'abc']
arr = m.create_string_array(False)
assert dtype == arr.dtype
def test_array_array():
from sys import byteorder
e = '<' if byteorder == 'little' else '>'
arr = m.create_array_array(3)
assert str(arr.dtype) == (
"{{'names':['a','b','c','d'], " +
"'formats':[('S4', (3,)),('" + e + "i4', (2,)),('u1', (3,)),('{e}f4', (4, 2))], " +
"'offsets':[0,12,20,24], 'itemsize':56}}").format(e=e)
assert m.print_array_array(arr) == [
"a={{A,B,C,D},{K,L,M,N},{U,V,W,X}},b={0,1}," +
"c={0,1,2},d={{0,1},{10,11},{20,21},{30,31}}",
"a={{W,X,Y,Z},{G,H,I,J},{Q,R,S,T}},b={1000,1001}," +
"c={10,11,12},d={{100,101},{110,111},{120,121},{130,131}}",
"a={{S,T,U,V},{C,D,E,F},{M,N,O,P}},b={2000,2001}," +
"c={20,21,22},d={{200,201},{210,211},{220,221},{230,231}}",
]
assert arr['a'].tolist() == [[b'ABCD', b'KLMN', b'UVWX'],
[b'WXYZ', b'GHIJ', b'QRST'],
[b'STUV', b'CDEF', b'MNOP']]
assert arr['b'].tolist() == [[0, 1], [1000, 1001], [2000, 2001]]
assert m.create_array_array(0).dtype == arr.dtype
def test_enum_array():
from sys import byteorder
e = '<' if byteorder == 'little' else '>'
arr = m.create_enum_array(3)
dtype = arr.dtype
assert dtype == np.dtype([('e1', e + 'i8'), ('e2', 'u1')])
assert m.print_enum_array(arr) == [
"e1=A,e2=X",
"e1=B,e2=Y",
"e1=A,e2=X"
]
assert arr['e1'].tolist() == [-1, 1, -1]
assert arr['e2'].tolist() == [1, 2, 1]
assert m.create_enum_array(0).dtype == dtype
def test_complex_array():
from sys import byteorder
e = '<' if byteorder == 'little' else '>'
arr = m.create_complex_array(3)
dtype = arr.dtype
assert dtype == np.dtype([('cflt', e + 'c8'), ('cdbl', e + 'c16')])
assert m.print_complex_array(arr) == [
"c:(0,0.25),(0.5,0.75)",
"c:(1,1.25),(1.5,1.75)",
"c:(2,2.25),(2.5,2.75)"
]
assert arr['cflt'].tolist() == [0.0 + 0.25j, 1.0 + 1.25j, 2.0 + 2.25j]
assert arr['cdbl'].tolist() == [0.5 + 0.75j, 1.5 + 1.75j, 2.5 + 2.75j]
assert m.create_complex_array(0).dtype == dtype
def test_signature(doc):
assert doc(m.create_rec_nested) == \
"create_rec_nested(arg0: int) -> numpy.ndarray[NestedStruct]"
def test_scalar_conversion():
n = 3
arrays = [m.create_rec_simple(n), m.create_rec_packed(n),
m.create_rec_nested(n), m.create_enum_array(n)]
funcs = [m.f_simple, m.f_packed, m.f_nested]
for i, func in enumerate(funcs):
for j, arr in enumerate(arrays):
if i == j and i < 2:
assert [func(arr[k]) for k in range(n)] == [k * 10 for k in range(n)]
else:
with pytest.raises(TypeError) as excinfo:
func(arr[0])
assert 'incompatible function arguments' in str(excinfo.value)
def test_register_dtype():
with pytest.raises(RuntimeError) as excinfo:
m.register_dtype()
assert 'dtype is already registered' in str(excinfo.value)
@pytest.unsupported_on_pypy
def test_str_leak():
from sys import getrefcount
fmt = "f4"
pytest.gc_collect()
start = getrefcount(fmt)
d = m.dtype_wrapper(fmt)
assert d is np.dtype("f4")
del d
pytest.gc_collect()
assert getrefcount(fmt) == start
def test_compare_buffer_info():
assert all(m.compare_buffer_info())
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_numpy_dtypes.py | test_numpy_dtypes.py |
import io
import struct
import sys
import pytest
from pybind11_tests import buffers as m
from pybind11_tests import ConstructorStats
PY3 = sys.version_info[0] >= 3
pytestmark = pytest.requires_numpy
with pytest.suppress(ImportError):
import numpy as np
def test_from_python():
with pytest.raises(RuntimeError) as excinfo:
m.Matrix(np.array([1, 2, 3])) # trying to assign a 1D array
assert str(excinfo.value) == "Incompatible buffer format!"
m3 = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
m4 = m.Matrix(m3)
for i in range(m4.rows()):
for j in range(m4.cols()):
assert m3[i, j] == m4[i, j]
cstats = ConstructorStats.get(m.Matrix)
assert cstats.alive() == 1
del m3, m4
assert cstats.alive() == 0
assert cstats.values() == ["2x3 matrix"]
assert cstats.copy_constructions == 0
# assert cstats.move_constructions >= 0 # Don't invoke any
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
# PyPy: Memory leak in the "np.array(m, copy=False)" call
# https://bitbucket.org/pypy/pypy/issues/2444
@pytest.unsupported_on_pypy
def test_to_python():
mat = m.Matrix(5, 4)
assert memoryview(mat).shape == (5, 4)
assert mat[2, 3] == 0
mat[2, 3] = 4.0
mat[3, 2] = 7.0
assert mat[2, 3] == 4
assert mat[3, 2] == 7
assert struct.unpack_from('f', mat, (3 * 4 + 2) * 4) == (7, )
assert struct.unpack_from('f', mat, (2 * 4 + 3) * 4) == (4, )
mat2 = np.array(mat, copy=False)
assert mat2.shape == (5, 4)
assert abs(mat2).sum() == 11
assert mat2[2, 3] == 4 and mat2[3, 2] == 7
mat2[2, 3] = 5
assert mat2[2, 3] == 5
cstats = ConstructorStats.get(m.Matrix)
assert cstats.alive() == 1
del mat
pytest.gc_collect()
assert cstats.alive() == 1
del mat2 # holds a mat reference
pytest.gc_collect()
assert cstats.alive() == 0
assert cstats.values() == ["5x4 matrix"]
assert cstats.copy_constructions == 0
# assert cstats.move_constructions >= 0 # Don't invoke any
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
@pytest.unsupported_on_pypy
def test_inherited_protocol():
"""SquareMatrix is derived from Matrix and inherits the buffer protocol"""
matrix = m.SquareMatrix(5)
assert memoryview(matrix).shape == (5, 5)
assert np.asarray(matrix).shape == (5, 5)
@pytest.unsupported_on_pypy
def test_pointer_to_member_fn():
for cls in [m.Buffer, m.ConstBuffer, m.DerivedBuffer]:
buf = cls()
buf.value = 0x12345678
value = struct.unpack('i', bytearray(buf))[0]
assert value == 0x12345678
@pytest.unsupported_on_pypy
def test_readonly_buffer():
buf = m.BufferReadOnly(0x64)
view = memoryview(buf)
assert view[0] == 0x64 if PY3 else b'd'
assert view.readonly
@pytest.unsupported_on_pypy
def test_selective_readonly_buffer():
buf = m.BufferReadOnlySelect()
memoryview(buf)[0] = 0x64 if PY3 else b'd'
assert buf.value == 0x64
io.BytesIO(b'A').readinto(buf)
assert buf.value == ord(b'A')
buf.readonly = True
with pytest.raises(TypeError):
memoryview(buf)[0] = 0 if PY3 else b'\0'
with pytest.raises(TypeError):
io.BytesIO(b'1').readinto(buf)
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_buffers.py | test_buffers.py |
import pytest
from pybind11_tests import callbacks as m
from threading import Thread
def test_callbacks():
from functools import partial
def func1():
return "func1"
def func2(a, b, c, d):
return "func2", a, b, c, d
def func3(a):
return "func3({})".format(a)
assert m.test_callback1(func1) == "func1"
assert m.test_callback2(func2) == ("func2", "Hello", "x", True, 5)
assert m.test_callback1(partial(func2, 1, 2, 3, 4)) == ("func2", 1, 2, 3, 4)
assert m.test_callback1(partial(func3, "partial")) == "func3(partial)"
assert m.test_callback3(lambda i: i + 1) == "func(43) = 44"
f = m.test_callback4()
assert f(43) == 44
f = m.test_callback5()
assert f(number=43) == 44
def test_bound_method_callback():
# Bound Python method:
class MyClass:
def double(self, val):
return 2 * val
z = MyClass()
assert m.test_callback3(z.double) == "func(43) = 86"
z = m.CppBoundMethodTest()
assert m.test_callback3(z.triple) == "func(43) = 129"
def test_keyword_args_and_generalized_unpacking():
def f(*args, **kwargs):
return args, kwargs
assert m.test_tuple_unpacking(f) == (("positional", 1, 2, 3, 4, 5, 6), {})
assert m.test_dict_unpacking(f) == (("positional", 1), {"key": "value", "a": 1, "b": 2})
assert m.test_keyword_args(f) == ((), {"x": 10, "y": 20})
assert m.test_unpacking_and_keywords1(f) == ((1, 2), {"c": 3, "d": 4})
assert m.test_unpacking_and_keywords2(f) == (
("positional", 1, 2, 3, 4, 5),
{"key": "value", "a": 1, "b": 2, "c": 3, "d": 4, "e": 5}
)
with pytest.raises(TypeError) as excinfo:
m.test_unpacking_error1(f)
assert "Got multiple values for keyword argument" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.test_unpacking_error2(f)
assert "Got multiple values for keyword argument" in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
m.test_arg_conversion_error1(f)
assert "Unable to convert call argument" in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
m.test_arg_conversion_error2(f)
assert "Unable to convert call argument" in str(excinfo.value)
def test_lambda_closure_cleanup():
m.test_cleanup()
cstats = m.payload_cstats()
assert cstats.alive() == 0
assert cstats.copy_constructions == 1
assert cstats.move_constructions >= 1
def test_cpp_function_roundtrip():
"""Test if passing a function pointer from C++ -> Python -> C++ yields the original pointer"""
assert m.test_dummy_function(m.dummy_function) == "matches dummy_function: eval(1) = 2"
assert (m.test_dummy_function(m.roundtrip(m.dummy_function)) ==
"matches dummy_function: eval(1) = 2")
assert m.roundtrip(None, expect_none=True) is None
assert (m.test_dummy_function(lambda x: x + 2) ==
"can't convert to function pointer: eval(1) = 3")
with pytest.raises(TypeError) as excinfo:
m.test_dummy_function(m.dummy_function2)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.test_dummy_function(lambda x, y: x + y)
assert any(s in str(excinfo.value) for s in ("missing 1 required positional argument",
"takes exactly 2 arguments"))
def test_function_signatures(doc):
assert doc(m.test_callback3) == "test_callback3(arg0: Callable[[int], int]) -> str"
assert doc(m.test_callback4) == "test_callback4() -> Callable[[int], int]"
def test_movable_object():
assert m.callback_with_movable(lambda _: None) is True
def test_async_callbacks():
# serves as state for async callback
class Item:
def __init__(self, value):
self.value = value
res = []
# generate stateful lambda that will store result in `res`
def gen_f():
s = Item(3)
return lambda j: res.append(s.value + j)
# do some work async
work = [1, 2, 3, 4]
m.test_async_callback(gen_f(), work)
# wait until work is done
from time import sleep
sleep(0.5)
assert sum(res) == sum([x + 3 for x in work])
def test_async_async_callbacks():
t = Thread(target=test_async_callbacks)
t.start()
t.join()
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_callbacks.py | test_callbacks.py |
import pytest
from pybind11_tests import numpy_vectorize as m
pytestmark = pytest.requires_numpy
with pytest.suppress(ImportError):
import numpy as np
def test_vectorize(capture):
assert np.isclose(m.vectorized_func3(np.array(3 + 7j)), [6 + 14j])
for f in [m.vectorized_func, m.vectorized_func2]:
with capture:
assert np.isclose(f(1, 2, 3), 6)
assert capture == "my_func(x:int=1, y:float=2, z:float=3)"
with capture:
assert np.isclose(f(np.array(1), np.array(2), 3), 6)
assert capture == "my_func(x:int=1, y:float=2, z:float=3)"
with capture:
assert np.allclose(f(np.array([1, 3]), np.array([2, 4]), 3), [6, 36])
assert capture == """
my_func(x:int=1, y:float=2, z:float=3)
my_func(x:int=3, y:float=4, z:float=3)
"""
with capture:
a = np.array([[1, 2], [3, 4]], order='F')
b = np.array([[10, 20], [30, 40]], order='F')
c = 3
result = f(a, b, c)
assert np.allclose(result, a * b * c)
assert result.flags.f_contiguous
# All inputs are F order and full or singletons, so we the result is in col-major order:
assert capture == """
my_func(x:int=1, y:float=10, z:float=3)
my_func(x:int=3, y:float=30, z:float=3)
my_func(x:int=2, y:float=20, z:float=3)
my_func(x:int=4, y:float=40, z:float=3)
"""
with capture:
a, b, c = np.array([[1, 3, 5], [7, 9, 11]]), np.array([[2, 4, 6], [8, 10, 12]]), 3
assert np.allclose(f(a, b, c), a * b * c)
assert capture == """
my_func(x:int=1, y:float=2, z:float=3)
my_func(x:int=3, y:float=4, z:float=3)
my_func(x:int=5, y:float=6, z:float=3)
my_func(x:int=7, y:float=8, z:float=3)
my_func(x:int=9, y:float=10, z:float=3)
my_func(x:int=11, y:float=12, z:float=3)
"""
with capture:
a, b, c = np.array([[1, 2, 3], [4, 5, 6]]), np.array([2, 3, 4]), 2
assert np.allclose(f(a, b, c), a * b * c)
assert capture == """
my_func(x:int=1, y:float=2, z:float=2)
my_func(x:int=2, y:float=3, z:float=2)
my_func(x:int=3, y:float=4, z:float=2)
my_func(x:int=4, y:float=2, z:float=2)
my_func(x:int=5, y:float=3, z:float=2)
my_func(x:int=6, y:float=4, z:float=2)
"""
with capture:
a, b, c = np.array([[1, 2, 3], [4, 5, 6]]), np.array([[2], [3]]), 2
assert np.allclose(f(a, b, c), a * b * c)
assert capture == """
my_func(x:int=1, y:float=2, z:float=2)
my_func(x:int=2, y:float=2, z:float=2)
my_func(x:int=3, y:float=2, z:float=2)
my_func(x:int=4, y:float=3, z:float=2)
my_func(x:int=5, y:float=3, z:float=2)
my_func(x:int=6, y:float=3, z:float=2)
"""
with capture:
a, b, c = np.array([[1, 2, 3], [4, 5, 6]], order='F'), np.array([[2], [3]]), 2
assert np.allclose(f(a, b, c), a * b * c)
assert capture == """
my_func(x:int=1, y:float=2, z:float=2)
my_func(x:int=2, y:float=2, z:float=2)
my_func(x:int=3, y:float=2, z:float=2)
my_func(x:int=4, y:float=3, z:float=2)
my_func(x:int=5, y:float=3, z:float=2)
my_func(x:int=6, y:float=3, z:float=2)
"""
with capture:
a, b, c = np.array([[1, 2, 3], [4, 5, 6]])[::, ::2], np.array([[2], [3]]), 2
assert np.allclose(f(a, b, c), a * b * c)
assert capture == """
my_func(x:int=1, y:float=2, z:float=2)
my_func(x:int=3, y:float=2, z:float=2)
my_func(x:int=4, y:float=3, z:float=2)
my_func(x:int=6, y:float=3, z:float=2)
"""
with capture:
a, b, c = np.array([[1, 2, 3], [4, 5, 6]], order='F')[::, ::2], np.array([[2], [3]]), 2
assert np.allclose(f(a, b, c), a * b * c)
assert capture == """
my_func(x:int=1, y:float=2, z:float=2)
my_func(x:int=3, y:float=2, z:float=2)
my_func(x:int=4, y:float=3, z:float=2)
my_func(x:int=6, y:float=3, z:float=2)
"""
def test_type_selection():
assert m.selective_func(np.array([1], dtype=np.int32)) == "Int branch taken."
assert m.selective_func(np.array([1.0], dtype=np.float32)) == "Float branch taken."
assert m.selective_func(np.array([1.0j], dtype=np.complex64)) == "Complex float branch taken."
def test_docs(doc):
assert doc(m.vectorized_func) == """
vectorized_func(arg0: numpy.ndarray[int32], arg1: numpy.ndarray[float32], arg2: numpy.ndarray[float64]) -> object
""" # noqa: E501 line too long
def test_trivial_broadcasting():
trivial, vectorized_is_trivial = m.trivial, m.vectorized_is_trivial
assert vectorized_is_trivial(1, 2, 3) == trivial.c_trivial
assert vectorized_is_trivial(np.array(1), np.array(2), 3) == trivial.c_trivial
assert vectorized_is_trivial(np.array([1, 3]), np.array([2, 4]), 3) == trivial.c_trivial
assert trivial.c_trivial == vectorized_is_trivial(
np.array([[1, 3, 5], [7, 9, 11]]), np.array([[2, 4, 6], [8, 10, 12]]), 3)
assert vectorized_is_trivial(
np.array([[1, 2, 3], [4, 5, 6]]), np.array([2, 3, 4]), 2) == trivial.non_trivial
assert vectorized_is_trivial(
np.array([[1, 2, 3], [4, 5, 6]]), np.array([[2], [3]]), 2) == trivial.non_trivial
z1 = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype='int32')
z2 = np.array(z1, dtype='float32')
z3 = np.array(z1, dtype='float64')
assert vectorized_is_trivial(z1, z2, z3) == trivial.c_trivial
assert vectorized_is_trivial(1, z2, z3) == trivial.c_trivial
assert vectorized_is_trivial(z1, 1, z3) == trivial.c_trivial
assert vectorized_is_trivial(z1, z2, 1) == trivial.c_trivial
assert vectorized_is_trivial(z1[::2, ::2], 1, 1) == trivial.non_trivial
assert vectorized_is_trivial(1, 1, z1[::2, ::2]) == trivial.c_trivial
assert vectorized_is_trivial(1, 1, z3[::2, ::2]) == trivial.non_trivial
assert vectorized_is_trivial(z1, 1, z3[1::4, 1::4]) == trivial.c_trivial
y1 = np.array(z1, order='F')
y2 = np.array(y1)
y3 = np.array(y1)
assert vectorized_is_trivial(y1, y2, y3) == trivial.f_trivial
assert vectorized_is_trivial(y1, 1, 1) == trivial.f_trivial
assert vectorized_is_trivial(1, y2, 1) == trivial.f_trivial
assert vectorized_is_trivial(1, 1, y3) == trivial.f_trivial
assert vectorized_is_trivial(y1, z2, 1) == trivial.non_trivial
assert vectorized_is_trivial(z1[1::4, 1::4], y2, 1) == trivial.f_trivial
assert vectorized_is_trivial(y1[1::4, 1::4], z2, 1) == trivial.c_trivial
assert m.vectorized_func(z1, z2, z3).flags.c_contiguous
assert m.vectorized_func(y1, y2, y3).flags.f_contiguous
assert m.vectorized_func(z1, 1, 1).flags.c_contiguous
assert m.vectorized_func(1, y2, 1).flags.f_contiguous
assert m.vectorized_func(z1[1::4, 1::4], y2, 1).flags.f_contiguous
assert m.vectorized_func(y1[1::4, 1::4], z2, 1).flags.c_contiguous
def test_passthrough_arguments(doc):
assert doc(m.vec_passthrough) == (
"vec_passthrough(" + ", ".join([
"arg0: float",
"arg1: numpy.ndarray[float64]",
"arg2: numpy.ndarray[float64]",
"arg3: numpy.ndarray[int32]",
"arg4: int",
"arg5: m.numpy_vectorize.NonPODClass",
"arg6: numpy.ndarray[float64]"]) + ") -> object")
b = np.array([[10, 20, 30]], dtype='float64')
c = np.array([100, 200]) # NOT a vectorized argument
d = np.array([[1000], [2000], [3000]], dtype='int')
g = np.array([[1000000, 2000000, 3000000]], dtype='int') # requires casting
assert np.all(
m.vec_passthrough(1, b, c, d, 10000, m.NonPODClass(100000), g) ==
np.array([[1111111, 2111121, 3111131],
[1112111, 2112121, 3112131],
[1113111, 2113121, 3113131]]))
def test_method_vectorization():
o = m.VectorizeTestClass(3)
x = np.array([1, 2], dtype='int')
y = np.array([[10], [20]], dtype='float32')
assert np.all(o.method(x, y) == [[14, 15], [24, 25]])
def test_array_collapse():
assert not isinstance(m.vectorized_func(1, 2, 3), np.ndarray)
assert not isinstance(m.vectorized_func(np.array(1), 2, 3), np.ndarray)
z = m.vectorized_func([1], 2, 3)
assert isinstance(z, np.ndarray)
assert z.shape == (1, )
z = m.vectorized_func(1, [[[2]]], 3)
assert isinstance(z, np.ndarray)
assert z.shape == (1, 1, 1)
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_numpy_vectorize.py | test_numpy_vectorize.py |
import pytest
from pybind11_tests import enums as m
def test_unscoped_enum():
assert str(m.UnscopedEnum.EOne) == "UnscopedEnum.EOne"
assert str(m.UnscopedEnum.ETwo) == "UnscopedEnum.ETwo"
assert str(m.EOne) == "UnscopedEnum.EOne"
# name property
assert m.UnscopedEnum.EOne.name == "EOne"
assert m.UnscopedEnum.ETwo.name == "ETwo"
assert m.EOne.name == "EOne"
# name readonly
with pytest.raises(AttributeError):
m.UnscopedEnum.EOne.name = ""
# name returns a copy
foo = m.UnscopedEnum.EOne.name
foo = "bar"
assert m.UnscopedEnum.EOne.name == "EOne"
# __members__ property
assert m.UnscopedEnum.__members__ == \
{"EOne": m.UnscopedEnum.EOne, "ETwo": m.UnscopedEnum.ETwo, "EThree": m.UnscopedEnum.EThree}
# __members__ readonly
with pytest.raises(AttributeError):
m.UnscopedEnum.__members__ = {}
# __members__ returns a copy
foo = m.UnscopedEnum.__members__
foo["bar"] = "baz"
assert m.UnscopedEnum.__members__ == \
{"EOne": m.UnscopedEnum.EOne, "ETwo": m.UnscopedEnum.ETwo, "EThree": m.UnscopedEnum.EThree}
for docstring_line in '''An unscoped enumeration
Members:
EOne : Docstring for EOne
ETwo : Docstring for ETwo
EThree : Docstring for EThree'''.split('\n'):
assert docstring_line in m.UnscopedEnum.__doc__
# Unscoped enums will accept ==/!= int comparisons
y = m.UnscopedEnum.ETwo
assert y == 2
assert 2 == y
assert y != 3
assert 3 != y
# Compare with None
assert (y != None) # noqa: E711
assert not (y == None) # noqa: E711
# Compare with an object
assert (y != object())
assert not (y == object())
# Compare with string
assert y != "2"
assert "2" != y
assert not ("2" == y)
assert not (y == "2")
with pytest.raises(TypeError):
y < object()
with pytest.raises(TypeError):
y <= object()
with pytest.raises(TypeError):
y > object()
with pytest.raises(TypeError):
y >= object()
with pytest.raises(TypeError):
y | object()
with pytest.raises(TypeError):
y & object()
with pytest.raises(TypeError):
y ^ object()
assert int(m.UnscopedEnum.ETwo) == 2
assert str(m.UnscopedEnum(2)) == "UnscopedEnum.ETwo"
# order
assert m.UnscopedEnum.EOne < m.UnscopedEnum.ETwo
assert m.UnscopedEnum.EOne < 2
assert m.UnscopedEnum.ETwo > m.UnscopedEnum.EOne
assert m.UnscopedEnum.ETwo > 1
assert m.UnscopedEnum.ETwo <= 2
assert m.UnscopedEnum.ETwo >= 2
assert m.UnscopedEnum.EOne <= m.UnscopedEnum.ETwo
assert m.UnscopedEnum.EOne <= 2
assert m.UnscopedEnum.ETwo >= m.UnscopedEnum.EOne
assert m.UnscopedEnum.ETwo >= 1
assert not (m.UnscopedEnum.ETwo < m.UnscopedEnum.EOne)
assert not (2 < m.UnscopedEnum.EOne)
# arithmetic
assert m.UnscopedEnum.EOne & m.UnscopedEnum.EThree == m.UnscopedEnum.EOne
assert m.UnscopedEnum.EOne | m.UnscopedEnum.ETwo == m.UnscopedEnum.EThree
assert m.UnscopedEnum.EOne ^ m.UnscopedEnum.EThree == m.UnscopedEnum.ETwo
def test_scoped_enum():
assert m.test_scoped_enum(m.ScopedEnum.Three) == "ScopedEnum::Three"
z = m.ScopedEnum.Two
assert m.test_scoped_enum(z) == "ScopedEnum::Two"
# Scoped enums will *NOT* accept ==/!= int comparisons (Will always return False)
assert not z == 3
assert not 3 == z
assert z != 3
assert 3 != z
# Compare with None
assert (z != None) # noqa: E711
assert not (z == None) # noqa: E711
# Compare with an object
assert (z != object())
assert not (z == object())
# Scoped enums will *NOT* accept >, <, >= and <= int comparisons (Will throw exceptions)
with pytest.raises(TypeError):
z > 3
with pytest.raises(TypeError):
z < 3
with pytest.raises(TypeError):
z >= 3
with pytest.raises(TypeError):
z <= 3
# order
assert m.ScopedEnum.Two < m.ScopedEnum.Three
assert m.ScopedEnum.Three > m.ScopedEnum.Two
assert m.ScopedEnum.Two <= m.ScopedEnum.Three
assert m.ScopedEnum.Two <= m.ScopedEnum.Two
assert m.ScopedEnum.Two >= m.ScopedEnum.Two
assert m.ScopedEnum.Three >= m.ScopedEnum.Two
def test_implicit_conversion():
assert str(m.ClassWithUnscopedEnum.EMode.EFirstMode) == "EMode.EFirstMode"
assert str(m.ClassWithUnscopedEnum.EFirstMode) == "EMode.EFirstMode"
f = m.ClassWithUnscopedEnum.test_function
first = m.ClassWithUnscopedEnum.EFirstMode
second = m.ClassWithUnscopedEnum.ESecondMode
assert f(first) == 1
assert f(first) == f(first)
assert not f(first) != f(first)
assert f(first) != f(second)
assert not f(first) == f(second)
assert f(first) == int(f(first))
assert not f(first) != int(f(first))
assert f(first) != int(f(second))
assert not f(first) == int(f(second))
# noinspection PyDictCreation
x = {f(first): 1, f(second): 2}
x[f(first)] = 3
x[f(second)] = 4
# Hashing test
assert str(x) == "{EMode.EFirstMode: 3, EMode.ESecondMode: 4}"
def test_binary_operators():
assert int(m.Flags.Read) == 4
assert int(m.Flags.Write) == 2
assert int(m.Flags.Execute) == 1
assert int(m.Flags.Read | m.Flags.Write | m.Flags.Execute) == 7
assert int(m.Flags.Read | m.Flags.Write) == 6
assert int(m.Flags.Read | m.Flags.Execute) == 5
assert int(m.Flags.Write | m.Flags.Execute) == 3
assert int(m.Flags.Write | 1) == 3
assert ~m.Flags.Write == -3
state = m.Flags.Read | m.Flags.Write
assert (state & m.Flags.Read) != 0
assert (state & m.Flags.Write) != 0
assert (state & m.Flags.Execute) == 0
assert (state & 1) == 0
state2 = ~state
assert state2 == -7
assert int(state ^ state2) == -1
def test_enum_to_int():
m.test_enum_to_int(m.Flags.Read)
m.test_enum_to_int(m.ClassWithUnscopedEnum.EMode.EFirstMode)
m.test_enum_to_uint(m.Flags.Read)
m.test_enum_to_uint(m.ClassWithUnscopedEnum.EMode.EFirstMode)
m.test_enum_to_long_long(m.Flags.Read)
m.test_enum_to_long_long(m.ClassWithUnscopedEnum.EMode.EFirstMode)
def test_duplicate_enum_name():
with pytest.raises(ValueError) as excinfo:
m.register_bad_enum()
assert str(excinfo.value) == 'SimpleEnum: element "ONE" already exists!'
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_enum.py | test_enum.py |
from pybind11_tests import chrono as m
import datetime
def test_chrono_system_clock():
# Get the time from both c++ and datetime
date1 = m.test_chrono1()
date2 = datetime.datetime.today()
# The returned value should be a datetime
assert isinstance(date1, datetime.datetime)
# The numbers should vary by a very small amount (time it took to execute)
diff = abs(date1 - date2)
# There should never be a days/seconds difference
assert diff.days == 0
assert diff.seconds == 0
# We test that no more than about 0.5 seconds passes here
# This makes sure that the dates created are very close to the same
# but if the testing system is incredibly overloaded this should still pass
assert diff.microseconds < 500000
def test_chrono_system_clock_roundtrip():
date1 = datetime.datetime.today()
# Roundtrip the time
date2 = m.test_chrono2(date1)
# The returned value should be a datetime
assert isinstance(date2, datetime.datetime)
# They should be identical (no information lost on roundtrip)
diff = abs(date1 - date2)
assert diff.days == 0
assert diff.seconds == 0
assert diff.microseconds == 0
def test_chrono_system_clock_roundtrip_date():
date1 = datetime.date.today()
# Roundtrip the time
datetime2 = m.test_chrono2(date1)
date2 = datetime2.date()
time2 = datetime2.time()
# The returned value should be a datetime
assert isinstance(datetime2, datetime.datetime)
assert isinstance(date2, datetime.date)
assert isinstance(time2, datetime.time)
# They should be identical (no information lost on roundtrip)
diff = abs(date1 - date2)
assert diff.days == 0
assert diff.seconds == 0
assert diff.microseconds == 0
# Year, Month & Day should be the same after the round trip
assert date1.year == date2.year
assert date1.month == date2.month
assert date1.day == date2.day
# There should be no time information
assert time2.hour == 0
assert time2.minute == 0
assert time2.second == 0
assert time2.microsecond == 0
def test_chrono_system_clock_roundtrip_time():
time1 = datetime.datetime.today().time()
# Roundtrip the time
datetime2 = m.test_chrono2(time1)
date2 = datetime2.date()
time2 = datetime2.time()
# The returned value should be a datetime
assert isinstance(datetime2, datetime.datetime)
assert isinstance(date2, datetime.date)
assert isinstance(time2, datetime.time)
# Hour, Minute, Second & Microsecond should be the same after the round trip
assert time1.hour == time2.hour
assert time1.minute == time2.minute
assert time1.second == time2.second
assert time1.microsecond == time2.microsecond
# There should be no date information (i.e. date = python base date)
assert date2.year == 1970
assert date2.month == 1
assert date2.day == 1
def test_chrono_duration_roundtrip():
# Get the difference between two times (a timedelta)
date1 = datetime.datetime.today()
date2 = datetime.datetime.today()
diff = date2 - date1
# Make sure this is a timedelta
assert isinstance(diff, datetime.timedelta)
cpp_diff = m.test_chrono3(diff)
assert cpp_diff.days == diff.days
assert cpp_diff.seconds == diff.seconds
assert cpp_diff.microseconds == diff.microseconds
def test_chrono_duration_subtraction_equivalence():
date1 = datetime.datetime.today()
date2 = datetime.datetime.today()
diff = date2 - date1
cpp_diff = m.test_chrono4(date2, date1)
assert cpp_diff.days == diff.days
assert cpp_diff.seconds == diff.seconds
assert cpp_diff.microseconds == diff.microseconds
def test_chrono_duration_subtraction_equivalence_date():
date1 = datetime.date.today()
date2 = datetime.date.today()
diff = date2 - date1
cpp_diff = m.test_chrono4(date2, date1)
assert cpp_diff.days == diff.days
assert cpp_diff.seconds == diff.seconds
assert cpp_diff.microseconds == diff.microseconds
def test_chrono_steady_clock():
time1 = m.test_chrono5()
assert isinstance(time1, datetime.timedelta)
def test_chrono_steady_clock_roundtrip():
time1 = datetime.timedelta(days=10, seconds=10, microseconds=100)
time2 = m.test_chrono6(time1)
assert isinstance(time2, datetime.timedelta)
# They should be identical (no information lost on roundtrip)
assert time1.days == time2.days
assert time1.seconds == time2.seconds
assert time1.microseconds == time2.microseconds
def test_floating_point_duration():
# Test using a floating point number in seconds
time = m.test_chrono7(35.525123)
assert isinstance(time, datetime.timedelta)
assert time.seconds == 35
assert 525122 <= time.microseconds <= 525123
diff = m.test_chrono_float_diff(43.789012, 1.123456)
assert diff.seconds == 42
assert 665556 <= diff.microseconds <= 665557
def test_nano_timepoint():
time = datetime.datetime.now()
time1 = m.test_nano_timepoint(time, datetime.timedelta(seconds=60))
assert(time1 == time + datetime.timedelta(seconds=60))
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_chrono.py | test_chrono.py |
from widget_module import Widget
class DerivedWidget(Widget):
def __init__(self, message):
super(DerivedWidget, self).__init__(message)
def the_answer(self):
return 42
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_embed/test_interpreter.py | test_interpreter.py |
import sys
import test_cmake_build
assert test_cmake_build.add(1, 2) == 3
print("{} imports, runs, and adds: 1 + 2 = 3".format(sys.argv[1]))
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tests/test_cmake_build/test.py | test.py |
from __future__ import print_function
import argparse
import sys
import sysconfig
from . import get_include
def print_includes():
dirs = [sysconfig.get_path('include'),
sysconfig.get_path('platinclude'),
get_include()]
# Make unique but preserve order
unique_dirs = []
for d in dirs:
if d not in unique_dirs:
unique_dirs.append(d)
print(' '.join('-I' + d for d in unique_dirs))
def main():
parser = argparse.ArgumentParser(prog='python -m pybind11')
parser.add_argument('--includes', action='store_true',
help='Include flags for both pybind11 and Python headers.')
args = parser.parse_args()
if not sys.argv[1:]:
parser.print_help()
if args.includes:
print_includes()
if __name__ == '__main__':
main()
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/pybind11/__main__.py | __main__.py |
version_info = (2, 5, 0)
__version__ = '.'.join(map(str, version_info))
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/pybind11/_version.py | _version.py |
from ._version import version_info, __version__ # noqa: F401 imported but unused
def get_include(user=False):
import os
d = os.path.dirname(__file__)
if os.path.exists(os.path.join(d, "include")):
# Package is installed
return os.path.join(d, "include")
else:
# Package is from a source directory
return os.path.join(os.path.dirname(d), "include")
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/pybind11/__init__.py | __init__.py |
from __future__ import print_function, division
import os
import sys
# Internal build script for generating debugging test .so size.
# Usage:
# python libsize.py file.so save.txt -- displays the size of file.so and, if save.txt exists, compares it to the
# size in it, then overwrites save.txt with the new size for future runs.
if len(sys.argv) != 3:
sys.exit("Invalid arguments: usage: python libsize.py file.so save.txt")
lib = sys.argv[1]
save = sys.argv[2]
if not os.path.exists(lib):
sys.exit("Error: requested file ({}) does not exist".format(lib))
libsize = os.path.getsize(lib)
print("------", os.path.basename(lib), "file size:", libsize, end='')
if os.path.exists(save):
with open(save) as sf:
oldsize = int(sf.readline())
if oldsize > 0:
change = libsize - oldsize
if change == 0:
print(" (no change)")
else:
print(" (change of {:+} bytes = {:+.2%})".format(change, change / oldsize))
else:
print()
with open(save, 'w') as sf:
sf.write(str(libsize))
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tools/libsize.py | libsize.py |
#!/usr/bin/env python3
#
# Syntax: mkdoc.py [-I<path> ..] [.. a list of header files ..]
#
# Extract documentation from C++ header files to use it in Python bindings
#
import os
import sys
import platform
import re
import textwrap
from clang import cindex
from clang.cindex import CursorKind
from collections import OrderedDict
from glob import glob
from threading import Thread, Semaphore
from multiprocessing import cpu_count
RECURSE_LIST = [
CursorKind.TRANSLATION_UNIT,
CursorKind.NAMESPACE,
CursorKind.CLASS_DECL,
CursorKind.STRUCT_DECL,
CursorKind.ENUM_DECL,
CursorKind.CLASS_TEMPLATE
]
PRINT_LIST = [
CursorKind.CLASS_DECL,
CursorKind.STRUCT_DECL,
CursorKind.ENUM_DECL,
CursorKind.ENUM_CONSTANT_DECL,
CursorKind.CLASS_TEMPLATE,
CursorKind.FUNCTION_DECL,
CursorKind.FUNCTION_TEMPLATE,
CursorKind.CONVERSION_FUNCTION,
CursorKind.CXX_METHOD,
CursorKind.CONSTRUCTOR,
CursorKind.FIELD_DECL
]
PREFIX_BLACKLIST = [
CursorKind.TRANSLATION_UNIT
]
CPP_OPERATORS = {
'<=': 'le', '>=': 'ge', '==': 'eq', '!=': 'ne', '[]': 'array',
'+=': 'iadd', '-=': 'isub', '*=': 'imul', '/=': 'idiv', '%=':
'imod', '&=': 'iand', '|=': 'ior', '^=': 'ixor', '<<=': 'ilshift',
'>>=': 'irshift', '++': 'inc', '--': 'dec', '<<': 'lshift', '>>':
'rshift', '&&': 'land', '||': 'lor', '!': 'lnot', '~': 'bnot',
'&': 'band', '|': 'bor', '+': 'add', '-': 'sub', '*': 'mul', '/':
'div', '%': 'mod', '<': 'lt', '>': 'gt', '=': 'assign', '()': 'call'
}
CPP_OPERATORS = OrderedDict(
sorted(CPP_OPERATORS.items(), key=lambda t: -len(t[0])))
job_count = cpu_count()
job_semaphore = Semaphore(job_count)
class NoFilenamesError(ValueError):
pass
def d(s):
return s if isinstance(s, str) else s.decode('utf8')
def sanitize_name(name):
name = re.sub(r'type-parameter-0-([0-9]+)', r'T\1', name)
for k, v in CPP_OPERATORS.items():
name = name.replace('operator%s' % k, 'operator_%s' % v)
name = re.sub('<.*>', '', name)
name = ''.join([ch if ch.isalnum() else '_' for ch in name])
name = re.sub('_$', '', re.sub('_+', '_', name))
return '__doc_' + name
def process_comment(comment):
result = ''
# Remove C++ comment syntax
leading_spaces = float('inf')
for s in comment.expandtabs(tabsize=4).splitlines():
s = s.strip()
if s.startswith('/*'):
s = s[2:].lstrip('*')
elif s.endswith('*/'):
s = s[:-2].rstrip('*')
elif s.startswith('///'):
s = s[3:]
if s.startswith('*'):
s = s[1:]
if len(s) > 0:
leading_spaces = min(leading_spaces, len(s) - len(s.lstrip()))
result += s + '\n'
if leading_spaces != float('inf'):
result2 = ""
for s in result.splitlines():
result2 += s[leading_spaces:] + '\n'
result = result2
# Doxygen tags
cpp_group = '([\w:]+)'
param_group = '([\[\w:\]]+)'
s = result
s = re.sub(r'\\c\s+%s' % cpp_group, r'``\1``', s)
s = re.sub(r'\\a\s+%s' % cpp_group, r'*\1*', s)
s = re.sub(r'\\e\s+%s' % cpp_group, r'*\1*', s)
s = re.sub(r'\\em\s+%s' % cpp_group, r'*\1*', s)
s = re.sub(r'\\b\s+%s' % cpp_group, r'**\1**', s)
s = re.sub(r'\\ingroup\s+%s' % cpp_group, r'', s)
s = re.sub(r'\\param%s?\s+%s' % (param_group, cpp_group),
r'\n\n$Parameter ``\2``:\n\n', s)
s = re.sub(r'\\tparam%s?\s+%s' % (param_group, cpp_group),
r'\n\n$Template parameter ``\2``:\n\n', s)
for in_, out_ in {
'return': 'Returns',
'author': 'Author',
'authors': 'Authors',
'copyright': 'Copyright',
'date': 'Date',
'remark': 'Remark',
'sa': 'See also',
'see': 'See also',
'extends': 'Extends',
'throw': 'Throws',
'throws': 'Throws'
}.items():
s = re.sub(r'\\%s\s*' % in_, r'\n\n$%s:\n\n' % out_, s)
s = re.sub(r'\\details\s*', r'\n\n', s)
s = re.sub(r'\\brief\s*', r'', s)
s = re.sub(r'\\short\s*', r'', s)
s = re.sub(r'\\ref\s*', r'', s)
s = re.sub(r'\\code\s?(.*?)\s?\\endcode',
r"```\n\1\n```\n", s, flags=re.DOTALL)
# HTML/TeX tags
s = re.sub(r'<tt>(.*?)</tt>', r'``\1``', s, flags=re.DOTALL)
s = re.sub(r'<pre>(.*?)</pre>', r"```\n\1\n```\n", s, flags=re.DOTALL)
s = re.sub(r'<em>(.*?)</em>', r'*\1*', s, flags=re.DOTALL)
s = re.sub(r'<b>(.*?)</b>', r'**\1**', s, flags=re.DOTALL)
s = re.sub(r'\\f\$(.*?)\\f\$', r'$\1$', s, flags=re.DOTALL)
s = re.sub(r'<li>', r'\n\n* ', s)
s = re.sub(r'</?ul>', r'', s)
s = re.sub(r'</li>', r'\n\n', s)
s = s.replace('``true``', '``True``')
s = s.replace('``false``', '``False``')
# Re-flow text
wrapper = textwrap.TextWrapper()
wrapper.expand_tabs = True
wrapper.replace_whitespace = True
wrapper.drop_whitespace = True
wrapper.width = 70
wrapper.initial_indent = wrapper.subsequent_indent = ''
result = ''
in_code_segment = False
for x in re.split(r'(```)', s):
if x == '```':
if not in_code_segment:
result += '```\n'
else:
result += '\n```\n\n'
in_code_segment = not in_code_segment
elif in_code_segment:
result += x.strip()
else:
for y in re.split(r'(?: *\n *){2,}', x):
wrapped = wrapper.fill(re.sub(r'\s+', ' ', y).strip())
if len(wrapped) > 0 and wrapped[0] == '$':
result += wrapped[1:] + '\n'
wrapper.initial_indent = \
wrapper.subsequent_indent = ' ' * 4
else:
if len(wrapped) > 0:
result += wrapped + '\n\n'
wrapper.initial_indent = wrapper.subsequent_indent = ''
return result.rstrip().lstrip('\n')
def extract(filename, node, prefix, output):
if not (node.location.file is None or
os.path.samefile(d(node.location.file.name), filename)):
return 0
if node.kind in RECURSE_LIST:
sub_prefix = prefix
if node.kind not in PREFIX_BLACKLIST:
if len(sub_prefix) > 0:
sub_prefix += '_'
sub_prefix += d(node.spelling)
for i in node.get_children():
extract(filename, i, sub_prefix, output)
if node.kind in PRINT_LIST:
comment = d(node.raw_comment) if node.raw_comment is not None else ''
comment = process_comment(comment)
sub_prefix = prefix
if len(sub_prefix) > 0:
sub_prefix += '_'
if len(node.spelling) > 0:
name = sanitize_name(sub_prefix + d(node.spelling))
output.append((name, filename, comment))
class ExtractionThread(Thread):
def __init__(self, filename, parameters, output):
Thread.__init__(self)
self.filename = filename
self.parameters = parameters
self.output = output
job_semaphore.acquire()
def run(self):
print('Processing "%s" ..' % self.filename, file=sys.stderr)
try:
index = cindex.Index(
cindex.conf.lib.clang_createIndex(False, True))
tu = index.parse(self.filename, self.parameters)
extract(self.filename, tu.cursor, '', self.output)
finally:
job_semaphore.release()
def read_args(args):
parameters = []
filenames = []
if "-x" not in args:
parameters.extend(['-x', 'c++'])
if not any(it.startswith("-std=") for it in args):
parameters.append('-std=c++11')
if platform.system() == 'Darwin':
dev_path = '/Applications/Xcode.app/Contents/Developer/'
lib_dir = dev_path + 'Toolchains/XcodeDefault.xctoolchain/usr/lib/'
sdk_dir = dev_path + 'Platforms/MacOSX.platform/Developer/SDKs'
libclang = lib_dir + 'libclang.dylib'
if os.path.exists(libclang):
cindex.Config.set_library_path(os.path.dirname(libclang))
if os.path.exists(sdk_dir):
sysroot_dir = os.path.join(sdk_dir, next(os.walk(sdk_dir))[1][0])
parameters.append('-isysroot')
parameters.append(sysroot_dir)
elif platform.system() == 'Linux':
# clang doesn't find its own base includes by default on Linux,
# but different distros install them in different paths.
# Try to autodetect, preferring the highest numbered version.
def clang_folder_version(d):
return [int(ver) for ver in re.findall(r'(?<!lib)(?<!\d)\d+', d)]
clang_include_dir = max((
path
for libdir in ['lib64', 'lib', 'lib32']
for path in glob('/usr/%s/clang/*/include' % libdir)
if os.path.isdir(path)
), default=None, key=clang_folder_version)
if clang_include_dir:
parameters.extend(['-isystem', clang_include_dir])
for item in args:
if item.startswith('-'):
parameters.append(item)
else:
filenames.append(item)
if len(filenames) == 0:
raise NoFilenamesError("args parameter did not contain any filenames")
return parameters, filenames
def extract_all(args):
parameters, filenames = read_args(args)
output = []
for filename in filenames:
thr = ExtractionThread(filename, parameters, output)
thr.start()
print('Waiting for jobs to finish ..', file=sys.stderr)
for i in range(job_count):
job_semaphore.acquire()
return output
def write_header(comments, out_file=sys.stdout):
print('''/*
This file contains docstrings for the Python bindings.
Do not edit! These were automatically extracted by mkdoc.py
*/
#define __EXPAND(x) x
#define __COUNT(_1, _2, _3, _4, _5, _6, _7, COUNT, ...) COUNT
#define __VA_SIZE(...) __EXPAND(__COUNT(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1))
#define __CAT1(a, b) a ## b
#define __CAT2(a, b) __CAT1(a, b)
#define __DOC1(n1) __doc_##n1
#define __DOC2(n1, n2) __doc_##n1##_##n2
#define __DOC3(n1, n2, n3) __doc_##n1##_##n2##_##n3
#define __DOC4(n1, n2, n3, n4) __doc_##n1##_##n2##_##n3##_##n4
#define __DOC5(n1, n2, n3, n4, n5) __doc_##n1##_##n2##_##n3##_##n4##_##n5
#define __DOC6(n1, n2, n3, n4, n5, n6) __doc_##n1##_##n2##_##n3##_##n4##_##n5##_##n6
#define __DOC7(n1, n2, n3, n4, n5, n6, n7) __doc_##n1##_##n2##_##n3##_##n4##_##n5##_##n6##_##n7
#define DOC(...) __EXPAND(__EXPAND(__CAT2(__DOC, __VA_SIZE(__VA_ARGS__)))(__VA_ARGS__))
#if defined(__GNUG__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
#endif
''', file=out_file)
name_ctr = 1
name_prev = None
for name, _, comment in list(sorted(comments, key=lambda x: (x[0], x[1]))):
if name == name_prev:
name_ctr += 1
name = name + "_%i" % name_ctr
else:
name_prev = name
name_ctr = 1
print('\nstatic const char *%s =%sR"doc(%s)doc";' %
(name, '\n' if '\n' in comment else ' ', comment), file=out_file)
print('''
#if defined(__GNUG__)
#pragma GCC diagnostic pop
#endif
''', file=out_file)
def mkdoc(args):
args = list(args)
out_path = None
for idx, arg in enumerate(args):
if arg.startswith("-o"):
args.remove(arg)
try:
out_path = arg[2:] or args.pop(idx)
except IndexError:
print("-o flag requires an argument")
exit(-1)
break
comments = extract_all(args)
if out_path:
try:
with open(out_path, 'w') as out_file:
write_header(comments, out_file)
except:
# In the event of an error, don't leave a partially-written
# output file.
try:
os.unlink(out_path)
except:
pass
raise
else:
write_header(comments)
if __name__ == '__main__':
try:
mkdoc(sys.argv[1:])
except NoFilenamesError:
print('Syntax: %s [.. a list of header files ..]' % sys.argv[0])
exit(-1)
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tools/mkdoc.py | mkdoc.py |
#!/bin/bash
#
# Script to check include/test code for common pybind11 code style errors.
#
# This script currently checks for
#
# 1. use of tabs instead of spaces
# 2. MSDOS-style CRLF endings
# 3. trailing spaces
# 4. missing space between keyword and parenthesis, e.g.: for(, if(, while(
# 5. Missing space between right parenthesis and brace, e.g. 'for (...){'
# 6. opening brace on its own line. It should always be on the same line as the
# if/while/for/do statement.
#
# Invoke as: tools/check-style.sh
#
check_style_errors=0
IFS=$'\n'
found="$( GREP_COLORS='mt=41' GREP_COLOR='41' grep $'\t' include tests/*.{cpp,py,h} docs/*.rst -rn --color=always )"
if [ -n "$found" ]; then
# The mt=41 sets a red background for matched tabs:
echo -e '\033[31;01mError: found tab characters in the following files:\033[0m'
check_style_errors=1
echo "$found" | sed -e 's/^/ /'
fi
found="$( grep -IUlr $'\r' include tests/*.{cpp,py,h} docs/*.rst --color=always )"
if [ -n "$found" ]; then
echo -e '\033[31;01mError: found CRLF characters in the following files:\033[0m'
check_style_errors=1
echo "$found" | sed -e 's/^/ /'
fi
found="$(GREP_COLORS='mt=41' GREP_COLOR='41' grep '[[:blank:]]\+$' include tests/*.{cpp,py,h} docs/*.rst -rn --color=always )"
if [ -n "$found" ]; then
# The mt=41 sets a red background for matched trailing spaces
echo -e '\033[31;01mError: found trailing spaces in the following files:\033[0m'
check_style_errors=1
echo "$found" | sed -e 's/^/ /'
fi
found="$(grep '\<\(if\|for\|while\|catch\)(\|){' include tests/*.{cpp,h} -rn --color=always)"
if [ -n "$found" ]; then
echo -e '\033[31;01mError: found the following coding style problems:\033[0m'
check_style_errors=1
echo "$found" | sed -e 's/^/ /'
fi
found="$(awk '
function prefix(filename, lineno) {
return " \033[35m" filename "\033[36m:\033[32m" lineno "\033[36m:\033[0m"
}
function mark(pattern, string) { sub(pattern, "\033[01;31m&\033[0m", string); return string }
last && /^\s*{/ {
print prefix(FILENAME, FNR-1) mark("\\)\\s*$", last)
print prefix(FILENAME, FNR) mark("^\\s*{", $0)
last=""
}
{ last = /(if|for|while|catch|switch)\s*\(.*\)\s*$/ ? $0 : "" }
' $(find include -type f) tests/*.{cpp,h} docs/*.rst)"
if [ -n "$found" ]; then
check_style_errors=1
echo -e '\033[31;01mError: braces should occur on the same line as the if/while/.. statement. Found issues in the following files:\033[0m'
echo "$found"
fi
exit $check_style_errors
| ACTIONet | /ACTIONet-0.1.1.tar.gz/ACTIONet-0.1.1/pybind11/tools/check-style.sh | check-style.sh |
import re
import subprocess
import time
from pathlib import Path
import pyperclip
schema = (
r"(aws_access_key_id = )[^\n]+\n"
r"(aws_secret_access_key = )[^\n]+\n"
r"(aws_session_token = )[^\n]+"
)
def main():
recent_value = pyperclip.paste()
while True:
tmp_value = pyperclip.paste()
if tmp_value != recent_value:
update_credentials(Path.home() / ".aws" / "credentials", tmp_value)
recent_value = tmp_value
time.sleep(0.5)
def update_credentials(credentials_file: Path, new_credentials: str):
new_credentials_match = re.fullmatch(
re.compile(r"(?P<account>\[\d{12}_\w+\])\n%s" % schema), new_credentials
)
if new_credentials_match:
try:
with open(credentials_file.as_posix(), "r") as f:
file_content = f.read()
except FileNotFoundError:
append_to_file(credentials_file, new_credentials)
return
old_credentials_match = re.search(
re.compile(
r"(%s)\n%s" % (re.escape(new_credentials_match["account"]), schema)
),
file_content,
)
if old_credentials_match:
write_to_file(
credentials_file,
new_credentials,
old_credentials_match[0],
file_content,
)
else:
append_to_file(credentials_file, new_credentials)
def write_to_file(
credentials_file: Path,
new_credentials: str,
old_credentials: str,
file_content: str,
):
with open(credentials_file.as_posix(), "w") as f:
f.write(file_content.replace(old_credentials, new_credentials))
display_notification("Existing credentials updated.")
def append_to_file(credentials_file: Path, credentials: str):
with open(credentials_file.as_posix(), "a") as f:
f.write(f"\n{credentials}\n")
display_notification("New credentials added.")
def display_notification(message: str):
try:
subprocess.run(["notify-send", "ACU", message])
except FileNotFoundError:
# notify-send may not be installed
pass
| ACU | /ACU-0.2.1-py3-none-any.whl/watcher/watcher.py | watcher.py |
A command line tool to run your code against sample test cases. Without leaving the terminal :)
Supported sites
^^^^^^^^^^^^^^^
- Codeforces
- Codechef
- Spoj
- Hackerrank
- Atcoder
Supported languages
^^^^^^^^^^^^^^^^^^^
- C
- C++
- Python
- Java
- Ruby
- Haskell
Installation
^^^^^^^^^^^^
Build from source
'''''''''''''''''
- ``git clone https://github.com/coderick14/ACedIt``
- ``cd ACedIt``
- ``python setup.py install``
As a Python package
'''''''''''''''''''
::
pip install --user ACedIt
Usage
^^^^^
::
usage: acedit [-h] [-s {codeforces,codechef,hackerrank,spoj}] [-c CONTEST]
[-p PROBLEM] [-f] [--run SOURCE_FILE]
[--set-default-site {codeforces,codechef,hackerrank,spoj}]
[--set-default-contest DEFAULT_CONTEST]
optional arguments:
-h, --help show this help message and exit
-s {codeforces,codechef,hackerrank,spoj}, --site {codeforces,codechef,hackerrank,spoj}
The competitive programming platform, e.g. codeforces,
codechef etc
-c CONTEST, --contest CONTEST
The name of the contest, e.g. JUNE17, LTIME49, COOK83
etc
-p PROBLEM, --problem PROBLEM
The problem code, e.g. OAK, PRMQ etc
-f, --force Force download the test cases, even if they are cached
--run SOURCE_FILE Name of source file to be run
--set-default-site {codeforces,codechef,hackerrank,spoj}
Name of default site to be used when -s flag is not
specified
--set-default-contest DEFAULT_CONTEST
Name of default contest to be used when -c flag is not
specified
--clear-cache Clear cached test cases for a given site. Takes
default site if -s flag is omitted
During installation, the default site is set to ``codeforces``. You
can change it anytime using the above mentioned flags.
Examples
^^^^^^^^
- Fetch test cases for a single problem
::
acedit -s codechef -c AUG17 -p CHEFFA
- Fetch test cases for all problems in a contest
::
acedit -s codechef -c AUG17
- Force download test cases, even when they are cached
::
acedit -s codeforces -c 86 -p D -f
- Test your code (when default-site and default-contest is set and filename is same as problem_code)
::
acedit --run D.cpp
::
acedit --run CHEFFA.py
**Since your filename is same as problem code, there's no need for the -p flag.**
- Test your code (specifying contest and problem codes explicitly)
::
acedit --run solve.cpp -c 835 -p D
::
acedit --run test.py -s codechef -c AUG17 -p CHEFFA
Note :
''''''
- The working directory structure mentioned in the previous versions is no longer required and supported.
- There might be some issues with Spoj, as they have widely varying DOM trees for different problems. Feel free to contribute on this. Or anything else that you can come up with :) | ACedIt | /ACedIt-1.2.1.tar.gz/ACedIt-1.2.1/README.rst | README.rst |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import sys
from acedit.install_entry import InstallEntry
with open('requirements.txt', 'r') as f:
requirements = [line.strip() for line in f.readlines()]
with open('README.rst', 'rb') as f:
long_description = f.read().decode('utf-8')
extra = {}
if sys.version_info >= (3,):
extra['use_2to3'] = True
setup(
name='ACedIt',
packages=['acedit'],
version='1.2.1',
description='Download and test against sample test cases from any competitive programming website',
long_description=long_description,
author='Deep Bhattacharyya',
author_email='bhattacharyya.rick14@gmail.com',
python_requires='>=3.5',
install_requires=requirements,
entry_points={
'console_scripts': ['acedit=acedit.main:main']
},
zip_safe=False,
url='https://github.com/coderick14/ACedIt',
keywords=['sample test cases', 'downloader', 'competitive programming'],
cmdclass={
'install': InstallEntry,
'develop': InstallEntry,
},
classifiers=[
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Topic :: Education',
],
license='MIT License'
)
| ACedIt | /ACedIt-1.2.1.tar.gz/ACedIt-1.2.1/setup.py | setup.py |
# coding: utf-8
import sys
import json
import re
import os
import functools
import platform
import threading
try:
from bs4 import BeautifulSoup as bs
import requests as rq
from argparse import ArgumentParser
except:
err = """
You haven't installed the required dependencies.
Run 'python setup.py install' to install the dependencies.
"""
print(err)
sys.exit(0)
class Utilities:
cache_dir = os.path.join(os.path.expanduser('~'), '.cache', 'ACedIt')
colors = {
'GREEN': '\033[92m',
'YELLOW': '\033[93m',
'RED': '\033[91m',
'ENDC': '\033[0m',
'BOLD': '\033[1m',
}
@staticmethod
def parse_flags(supported_sites):
"""
Utility function to parse command line flags
"""
parser = ArgumentParser()
parser.add_argument('-s', '--site',
dest='site',
choices=supported_sites,
help='The competitive programming platform, e.g. codeforces, codechef etc')
parser.add_argument('-c', '--contest',
dest='contest',
help='The name of the contest, e.g. JUNE17, LTIME49, COOK83 etc')
parser.add_argument('-p', '--problem',
dest='problem',
help='The problem code, e.g. OAK, PRMQ etc')
parser.add_argument('-f', '--force',
dest='force',
action='store_true',
help='Force download the test cases, even if they are cached')
parser.add_argument('--run',
dest='source_file',
help='Name of source file to be run')
parser.add_argument('--set-default-site',
dest='default_site',
choices=supported_sites,
help='Name of default site to be used when -s flag is not specified')
parser.add_argument('--set-default-contest',
dest='default_contest',
help='Name of default contest to be used when -c flag is not specified')
parser.add_argument('--clear-cache',
dest='clear_cache',
action='store_true',
help='Clear cached test cases for a given site. Takes default site if -s flag is omitted')
parser.set_defaults(force=False, clear_cache=False)
args = parser.parse_args()
flags = {}
if args.site is None or args.contest is None:
import json
site, contest = None, None
try:
with open(os.path.join(Utilities.cache_dir, 'constants.json'), 'r') as f:
data = f.read()
data = json.loads(data)
site = data.get(
'default_site', None) if args.site is None else args.site
contest = data.get(
'default_contest', None) if args.contest is None else args.contest
except:
pass
flags['site'] = site
flags['contest'] = contest if not site == 'spoj' else None
else:
flags['site'] = args.site
flags['contest'] = args.contest
flags['problem'] = args.problem
flags['force'] = args.force
flags['clear_cache'] = args.clear_cache
flags['source'] = args.source_file
flags['default_site'] = args.default_site
flags['default_contest'] = args.default_contest
return flags
@staticmethod
def set_constants(key, value):
"""
Utility method to set default site and contest
"""
with open(os.path.join(Utilities.cache_dir, 'constants.json'), 'r+') as f:
data = f.read()
data = json.loads(data)
data[key] = value
f.seek(0)
f.write(json.dumps(data, indent=2))
f.truncate()
print('Set %s to %s' % (key, value))
@staticmethod
def check_cache(site, contest, problem):
"""
Method to check if the test cases already exist in cache
If not, create the directory structure to store test cases
"""
if problem is None:
if not os.path.isdir(os.path.join(Utilities.cache_dir, site, contest)):
os.makedirs(os.path.join(Utilities.cache_dir, site, contest))
return False
# Handle case for SPOJ specially as it does not have contests
contest = '' if site == 'spoj' else contest
if os.path.isdir(os.path.join(Utilities.cache_dir, site, contest, problem)):
return True
else:
os.makedirs(os.path.join(Utilities.cache_dir, site,
contest, problem))
return False
@staticmethod
def clear_cache(site):
"""
Method to clear cached test cases
"""
confirm = input(
'Remove entire cache for site %s? (y/N) : ' % (site))
if confirm == 'y':
from shutil import rmtree
try:
rmtree(os.path.join(Utilities.cache_dir, site))
except:
print('Some error occured. Try again.')
return
os.makedirs(os.path.join(Utilities.cache_dir, site))
print('Done.')
@staticmethod
def store_files(site, contest, problem, inputs, outputs):
"""
Method to store the test cases in files
"""
# Handle case for SPOJ specially as it does not have contests
contest = '' if site == 'spoj' else contest
for i, inp in enumerate(inputs):
filename = os.path.join(
Utilities.cache_dir, site, contest, problem, 'Input' + str(i))
with open(filename, 'w') as handler:
handler.write(inp)
for i, out in enumerate(outputs):
filename = os.path.join(
Utilities.cache_dir, site, contest, problem, 'Output' + str(i))
with open(filename, 'w') as handler:
handler.write(out)
@staticmethod
def download_problem_testcases(args):
"""
Download test cases for a given problem
"""
if args['site'] == 'codeforces':
platform = Codeforces(args)
elif args['site'] == 'codechef':
platform = Codechef(args)
elif args['site'] == 'spoj':
platform = Spoj(args)
elif args['site'] == 'atcoder':
platform = AtCoder(args)
else:
platform = Hackerrank(args)
is_in_cache = Utilities.check_cache(
platform.site, platform.contest, platform.problem)
if not args['force'] and is_in_cache:
print('Test cases found in cache...')
sys.exit(0)
platform.scrape_problem()
@staticmethod
def download_contest_testcases(args):
"""
Download test cases for all problems in a given contest
"""
if args['site'] == 'codeforces':
platform = Codeforces(args)
elif args['site'] == 'codechef':
platform = Codechef(args)
elif args['site'] == 'hackerrank':
platform = Hackerrank(args)
elif args['site'] == 'atcoder':
platform = AtCoder(args)
Utilities.check_cache(
platform.site, platform.contest, platform.problem)
platform.scrape_contest()
@staticmethod
def input_file_to_string(path, num_cases):
"""
Method to return sample inputs as a list
"""
inputs = []
for i in range(num_cases):
with open(os.path.join(path, 'Input' + str(i)), 'r') as fh:
inputs += [fh.read()]
return inputs
@staticmethod
def cleanup(num_cases, basename, extension):
"""
Method to clean up temporarily created files
"""
for i in range(num_cases):
if os.path.isfile('temp_output' + str(i)):
os.remove('temp_output' + str(i))
if extension == 'java':
os.system('rm ' + basename + '*.class')
if extension == 'cpp':
os.system('rm ' + basename)
@staticmethod
def handle_kbd_interrupt(site, contest, problem):
"""
Method to handle keyboard interrupt
"""
from shutil import rmtree
print('Cleaning up...')
# Handle case for SPOJ specially as it does not have contests
contest = '' if site == 'spoj' else contest
if problem is not None:
path = os.path.join(Utilities.cache_dir, site, contest, problem)
if os.path.isdir(path):
rmtree(path)
else:
path = os.path.join(Utilities.cache_dir, site, contest)
if os.path.isdir(path):
rmtree(path)
print('Done. Exiting gracefully.')
@staticmethod
def run_solution(args):
"""
Method to run and test the user's solution against sample cases
"""
problem = args['source']
extension = problem.split('.')[-1]
problem = problem.split('.')[0]
basename = problem.split('/')[-1]
problem_path = os.path.join(os.getcwd(), problem)
if not os.path.isfile(problem_path + '.' + extension):
print('ERROR : No such file')
sys.exit(0)
problem_code = args['problem'] if args['problem'] else basename
contest_code = '' if args['site'] == 'spoj' else args['contest']
testcases_path = os.path.join(Utilities.cache_dir, args[
'site'], contest_code, problem_code)
if os.path.isdir(testcases_path):
num_cases = len(os.listdir(testcases_path)) // 2
results, expected_outputs, user_outputs = [], [], []
if extension in ['c', 'cpp', 'java', 'py', 'hs', 'rb']:
# Compiler flags taken from http://codeforces.com/blog/entry/79
compiler = {
'hs': 'ghc --make -O -dynamic -o ' + basename,
'py': None,
'rb': None,
'c': 'gcc -static -DONLINE_JUDGE -fno-asm -lm -s -O2 -o ' + basename,
'cpp': 'g++ -static -DONLINE_JUDGE -lm -s -x c++ -O2 -std=c++14 -o ' + basename,
'java': 'javac -d .'
}[extension]
execute_command = {
'py': 'python \'' + problem_path + '.' + extension + '\'',
'rb': 'ruby \'' + problem_path + '.' + extension + '\'',
'hs': './' + basename,
'c': './' + basename,
'cpp': './' + basename,
'java': 'java -DONLINE_JUDGE=true -Duser.language=en -Duser.region=US -Duser.variant=US ' + basename
}[extension]
if compiler is None:
compile_status = 0
else:
compile_status = os.system(
compiler + ' \'' + problem_path + '.' + extension + '\'')
if compile_status == 0:
# Compiled successfully
timeout_command = 'timeout' if platform.system() == 'Linux' else 'gtimeout'
for i in range(num_cases):
status = os.system(timeout_command + ' 2s ' + execute_command + ' < ' + os.path.join(
testcases_path, 'Input' + str(i)) + ' > temp_output' + str(i))
with open(os.path.join(testcases_path, 'Output' + str(i)), 'r') as out_handler:
expected_output = out_handler.read().strip().split('\n')
expected_output = '\n'.join(
[line.strip() for line in expected_output])
expected_outputs += [expected_output]
if status == 31744:
# Time Limit Exceeded
results += [Utilities.colors['BOLD'] + Utilities.colors[
'YELLOW'] + 'TLE' + Utilities.colors['ENDC']]
user_outputs += ['']
elif status == 0:
# Ran successfully
with open('temp_output' + str(i), 'r') as temp_handler:
user_output = temp_handler.read().strip().split('\n')
user_output = '\n'.join(
[line.strip() for line in user_output])
user_outputs += [user_output]
if expected_output == user_output:
# All Correct
results += [Utilities.colors['BOLD'] + Utilities.colors[
'GREEN'] + 'AC' + Utilities.colors['ENDC']]
else:
# Wrong Answer
results += [Utilities.colors['BOLD'] + Utilities.colors[
'RED'] + 'WA' + Utilities.colors['ENDC']]
else:
# Runtime Error
results += [Utilities.colors['BOLD'] +
Utilities.colors['RED'] + 'RTE' + Utilities.colors['ENDC']]
user_outputs += ['']
else:
# Compilation error occurred
message = Utilities.colors['BOLD'] + Utilities.colors[
'RED'] + 'Compilation error. Not run against test cases' + Utilities.colors['ENDC'] + '.'
print(message)
sys.exit(0)
else:
print('Supports only C, C++, Python, Java, Ruby and Haskell as of now.')
sys.exit(0)
from terminaltables import AsciiTable
table_data = [['Serial No', 'Input',
'Expected Output', 'Your Output', 'Result']]
inputs = Utilities.input_file_to_string(testcases_path, num_cases)
for i in range(num_cases):
row = [
i + 1,
inputs[i],
expected_outputs[i],
user_outputs[i] if any(sub in results[i]
for sub in ['AC', 'WA']) else 'N/A',
results[i]
]
table_data.append(row)
table = AsciiTable(table_data)
print(table.table)
# Clean up temporary files
Utilities.cleanup(num_cases, basename, extension)
else:
print('Test cases not found locally...')
args['problem'] = problem_code
args['force'] = True
args['source'] = problem + '.' + extension
Utilities.download_problem_testcases(args)
print('Running your solution against sample cases...')
Utilities.run_solution(args)
@staticmethod
def get_html(url):
"""
Utility function get the html content of an url
"""
sys.setrecursionlimit(10000)
MAX_TRIES = 3
try:
for try_count in range(MAX_TRIES):
r = rq.get(url)
if r.status_code == 200:
break
if try_count >= MAX_TRIES:
print('Could not fetch content. Please try again.')
sys.exit(0)
except Exception as e:
print('Please check your internet connection and try again.')
sys.exit(0)
return r
class Platform:
"""
Base class for platforms
"""
def __init__(self, args):
self.site = args['site']
self.contest = args['contest']
self.force_download = args['force']
self.responses = []
self.lock = threading.Lock()
def get_problem_name(self, response):
return response.url.split('/')[-1]
def build_problem_url(self):
raise NotImplementedError
def parse_html(self):
raise NotImplementedError
def scrape_problem(self):
"""
Method to scrape a single problem
"""
contest = '' if self.site == 'spoj' else self.contest
print('Fetching problem %s-%s from %s...' % (contest, self.problem, self.site))
req = Utilities.get_html(self.build_problem_url())
inputs, outputs = self.parse_html(req)
Utilities.store_files(self.site, self.contest,
self.problem, inputs, outputs)
print('Done.')
def fetch_html(self, link):
r = rq.get(link)
with self.lock:
self.responses += [r]
def handle_batch_requests(self, links):
"""
Method to send simultaneous requests to all problem pages
"""
threads = [threading.Thread(target=self.fetch_html, args=(link,)) for link in links]
for t in threads:
t.start()
for t in threads:
t.join()
failed_requests = []
for response in self.responses:
if response is not None and response.status_code == 200:
inputs, outputs = self.parse_html(response)
self.problem = self.get_problem_name(response)
Utilities.check_cache(self.site, self.contest, self.problem)
Utilities.store_files(
self.site, self.contest, self.problem, inputs, outputs)
else:
failed_requests += [response.url]
return failed_requests
def scrape_contest(self):
"""
Method to scrape all problems from a given contest
"""
print('Checking problems available for contest %s-%s...' % (self.site, self.contest))
req = Utilities.get_html(self.build_contest_url())
links = self.get_problem_links(req)
print('Found %d problems..' % (len(links)))
if not self.force_download:
cached_problems = os.listdir(os.path.join(
Utilities.cache_dir, self.site, self.contest))
links = [link for link in links if link.split(
'/')[-1] not in cached_problems]
failed_requests = self.handle_batch_requests(links)
if len(failed_requests) > 0:
self.handle_batch_requests(failed_requests)
class Codeforces(Platform):
"""
Class to handle downloading of test cases from Codeforces
"""
def __init__(self, args):
self.problem = args['problem']
super(Codeforces, self).__init__(args)
def parse_html(self, req):
"""
Method to parse the html and get test cases
from a codeforces problem
"""
soup = bs(req.text, 'html.parser')
inputs = soup.findAll('div', {'class': 'input'})
outputs = soup.findAll('div', {'class': 'output'})
if len(inputs) == 0 or len(outputs) == 0:
print('Problem not found..')
Utilities.handle_kbd_interrupt(
self.site, self.contest, self.problem)
sys.exit(0)
repls = ('<br>', '\n'), ('<br/>', '\n'), ('</br>', '')
formatted_inputs, formatted_outputs = [], []
for inp in inputs:
pre = inp.find('pre').decode_contents()
pre = functools.reduce(lambda a, kv: a.replace(*kv), repls, pre)
pre = re.sub('<[^<]+?>', '', pre)
formatted_inputs += [pre]
for out in outputs:
pre = out.find('pre').decode_contents()
pre = functools.reduce(lambda a, kv: a.replace(*kv), repls, pre)
pre = re.sub('<[^<]+?>', '', pre)
formatted_outputs += [pre]
# print 'Inputs', formatted_inputs
# print 'Outputs', formatted_outputs
return formatted_inputs, formatted_outputs
def get_problem_links(self, req):
"""
Method to get the links for the problems
in a given codeforces contest
"""
soup = bs(req.text, 'html.parser')
table = soup.find('table', {'class': 'problems'})
if table is None:
print('Contest not found..')
Utilities.handle_kbd_interrupt(
self.site, self.contest, self.problem)
sys.exit(0)
links = ['http://codeforces.com' +
td.find('a')['href'] for td in table.findAll('td', {'class': 'id'})]
return links
def build_problem_url(self):
contest_type = 'contest' if int(self.contest) <= 100000 else 'gym'
return 'http://codeforces.com/%s/%s/problem/%s' % (contest_type, self.contest, self.problem)
def build_contest_url(self):
contest_type = 'contest' if int(self.contest) <= 100000 else 'gym'
return 'http://codeforces.com/%s/%s' % (contest_type, self.contest)
class Codechef(Platform):
"""
Class to handle downloading of test cases from Codechef
"""
def __init__(self, args):
self.problem = args['problem']
super(Codechef, self).__init__(args)
def _extract(self, data, marker):
data_low = data.lower()
extracts = []
idx = data_low.find(marker, 0)
while not idx == -1:
start = data_low.find('```', idx)
end = data_low.find('```', start + 3)
extracts += [data[start + 3:end]]
idx = data_low.find(marker, end)
return [extract.strip() for extract in extracts]
def parse_html(self, req):
"""
Method to parse the html and get test cases
from a codechef problem
"""
try:
data = str(json.loads(req.text)['body'])
except (KeyError, ValueError):
print('Problem not found..')
Utilities.handle_kbd_interrupt(
self.site, self.contest, self.problem)
sys.exit(0)
inputs = self._extract(data, 'example input')
outputs = self._extract(data, 'example output')
return inputs, outputs
def get_problem_links(self, req):
"""
Method to get the links for the problems
in a given codechef contest
"""
soup = bs(req.text, 'html.parser')
table = soup.find('table', {'class': 'dataTable'})
if table is None:
print('Contest not found..')
Utilities.handle_kbd_interrupt(
self.site, self.contest, self.problem)
sys.exit(0)
links = [div.find('a')['href']
for div in table.findAll('div', {'class': 'problemname'})]
links = ['https://codechef.com/api/contests/' + self.contest +
'/problems/' + link.split('/')[-1] for link in links]
return links
def build_problem_url(self):
return 'https://codechef.com/api/contests/%s/problems/%s' % (self.contest, self.problem)
def build_contest_url(self):
return 'https://codechef.com/%s' % self.contest
class Spoj(Platform):
"""
Class to handle downloading of test cases from Spoj
"""
def __init__(self, args):
self.problem = args['problem'].upper()
super(Spoj, self).__init__(args)
def parse_html(self, req):
"""
Method to parse the html and get test cases
from a spoj problem
"""
soup = bs(req.text, 'html.parser')
test_cases = soup.findAll('pre')
if test_cases is None or len(test_cases) == 0:
print('Problem not found..')
Utilities.handle_kbd_interrupt(
self.site, self.contest, self.problem)
sys.exit(0)
formatted_inputs, formatted_outputs = [], []
input_list = [
'<pre>(.|\n|\r)*<b>Input:?</b>:?',
'<b>Output:?</b>(.|\n|\r)*'
]
output_list = [
'<pre>(.|\n|\r)*<b>Output:?</b>:?',
'</pre>'
]
input_regex = re.compile('(%s)' % '|'.join(input_list))
output_regex = re.compile('(%s)' % '|'.join(output_list))
for case in test_cases:
inp = input_regex.sub('', str(case))
out = output_regex.sub('', str(case))
inp = re.sub('<[^<]+?>', '', inp)
out = re.sub('<[^<]+?>', '', out)
formatted_inputs += [inp.strip()]
formatted_outputs += [out.strip()]
# print 'Inputs', formatted_inputs
# print 'Outputs', formatted_outputs
return formatted_inputs, formatted_outputs
def build_problem_url(self):
return 'http://spoj.com/problems/%s' % self.problem
class Hackerrank(Platform):
"""
Class to handle downloading of test cases from Hackerrank
"""
def __init__(self, args):
self.problem = '-'.join(args['problem'].split()
).lower() if args['problem'] is not None else None
super(Hackerrank, self).__init__(args)
def parse_html(self, req):
"""
Method to parse the html and get test cases
from a hackerrank problem
"""
try:
data = json.loads(req.text)
soup = bs(data['model']['body_html'], 'html.parser')
except (KeyError, ValueError):
print('Problem not found..')
Utilities.handle_kbd_interrupt(
self.site, self.contest, self.problem)
sys.exit(0)
input_divs = soup.findAll('div', {'class': 'challenge_sample_input'})
output_divs = soup.findAll('div', {'class': 'challenge_sample_output'})
inputs = [input_div.find('pre') for input_div in input_divs]
outputs = [output_div.find('pre') for output_div in output_divs]
regex_list = [
'<pre>(<code>)?',
'(</code>)?</pre>'
]
regex = re.compile('(%s)' % '|'.join(regex_list))
formatted_inputs, formatted_outputs = [], []
for inp in inputs:
spans = inp.findAll('span')
if len(spans) > 0:
formatted_input = '\n'.join(
[span.decode_contents() for span in spans])
else:
formatted_input = regex.sub('', str(inp))
formatted_inputs += [formatted_input.strip()]
for out in outputs:
spans = out.findAll('span')
if len(spans) > 0:
formatted_output = '\n'.join(
[span.decode_contents() for span in spans])
else:
formatted_output = regex.sub('', str(out))
formatted_outputs += [formatted_output.strip()]
# print 'Inputs', formatted_inputs
# print 'Outputs', formatted_outputs
return formatted_inputs, formatted_outputs
def get_problem_links(self, req):
"""
Method to get the links for the problems
in a given hackerrank contest
"""
try:
data = json.loads(req.text)
data = data['models']
except (KeyError, ValueError):
print('Contest not found..')
Utilities.handle_kbd_interrupt(
self.site, self.contest, self.problem)
sys.exit(0)
links = ['https://www.hackerrank.com/rest/contests/' + self.contest +
'/challenges/' + problem['slug'] for problem in data]
return links
def build_problem_url(self):
return 'https://www.hackerrank.com/rest/contests/%s/challenges/%s' % (self.contest, self.problem)
def build_contest_url(self):
'https://www.hackerrank.com/rest/contests/%s/challenges' % self.contest
class AtCoder(Platform):
"""
Class to handle downloading of test cases from atcoder
"""
def __init__(self, args):
self.problem = args['problem']
super(AtCoder, self).__init__(args)
def parse_html(self, req):
"""
Method to parse the html and get test cases
from a atcoder problem
"""
soup = bs(req.text, 'html.parser')
inouts= soup.findAll('div', {'class': 'part'})
repls = ('<br>', '\n'), ('<br/>', '\n'), ('</br>', '')
formatted_inputs, formatted_outputs = [], []
inouts = filter((lambda x: x.find('section') and x.find('section').find('h3')), inouts)
for inp in inouts:
if inp.find('section').find('h3').text[:3] == "入力例":
pre = inp.find('pre').decode_contents()
pre = functools.reduce(lambda a, kv: a.replace(*kv), repls, pre)
pre = re.sub('<[^<]+?>', '', pre)
pre = pre.replace("&", "&")
pre = pre.replace("<", "<")
pre = pre.replace(">", ">")
formatted_inputs += [pre]
if inp.find('section').find('h3').text[:3] == "出力例":
pre = inp.find('pre').decode_contents()
pre = functools.reduce(lambda a, kv: a.replace(*kv), repls, pre)
pre = re.sub('<[^<]+?>', '', pre)
pre = pre.replace("&", "&")
pre = pre.replace("<", "<")
pre = pre.replace(">", ">")
formatted_outputs += [pre]
return formatted_inputs, formatted_outputs
def get_problem_links(self, req):
"""
Method to get the links for the problems
in a given atcoder contest
"""
soup = bs(req.text, 'html.parser')
table = soup.find('tbody')
if table is None:
print('Contest not found..')
Utilities.handle_kbd_interrupt(
self.site, self.contest, self.problem)
sys.exit(0)
links = ['http://beta.atcoder.jp' +
td.find('a')['href'] for td in soup.findAll('td', {'class': 'text-center no-break'})]
return links
def get_problem_name(self, response):
"""
Method to get the names for the problems
in a given atcoder contest
"""
soup = bs(response.text, 'html.parser')
return soup.find('title').get_text()[0].lower()
def build_problem_url(self):
return 'https://beta.atcoder.jp/contests/%s/tasks/%s' % (self.contest, self.problem)
def build_contest_url(self):
return 'https://beta.atcoder.jp/contests/%s/tasks/' % self.contest
| ACedIt | /ACedIt-1.2.1.tar.gz/ACedIt-1.2.1/acedit/util.py | util.py |
import json
import os
from setuptools.command.install import install
class InstallEntry(install):
def run(self):
default_site = 'codeforces'
cache_dir = os.path.join(os.path.expanduser('~'), '.cache', 'ACedIt')
from acedit.main import supported_sites
for site in supported_sites:
# create cache directory structure
if not os.path.isdir(os.path.join(cache_dir, site)):
os.makedirs(os.path.join(cache_dir, site))
data = {'default_site': default_site.strip(
), 'default_contest': None, 'cachedir': cache_dir}
with open(os.path.join(cache_dir, 'constants.json'), 'w') as f:
f.write(json.dumps(data, indent=2))
install.run(self)
| ACedIt | /ACedIt-1.2.1.tar.gz/ACedIt-1.2.1/acedit/install_entry.py | install_entry.py |
import sys
import acedit.util as util
supported_sites = ['codeforces', 'codechef', 'hackerrank', 'spoj', 'atcoder']
def validate_args(args):
"""
Method to check valid combination of flags
"""
if args['default_site'] is not None or args['default_contest'] is not None:
return
if args['clear_cache']:
return
if not args['site'] == 'spoj' and args['contest'] is None:
print('Please specify contest code or set a default contest.')
sys.exit(0)
if args['source']:
return
if args['site'] == 'spoj' and args['problem'] is None:
print('Please specify a problem code for Spoj.')
sys.exit(0)
def main():
args = util.Utilities.parse_flags(supported_sites)
validate_args(args)
try:
if args['default_site']:
# set default site
util.Utilities.set_constants('default_site', args['default_site'])
elif args['default_contest']:
# set default contest
util.Utilities.set_constants('default_contest', args['default_contest'])
elif args['clear_cache']:
# clear cached test cases
util.Utilities.clear_cache(args['site'])
elif args['source']:
# run code
util.Utilities.run_solution(args)
elif args['problem'] is not None:
# fetch single problem
util.Utilities.download_problem_testcases(args)
elif args['contest']:
# fetch all problems for the contest
util.Utilities.download_contest_testcases(args)
else:
print('Invalid combination of flags.')
except KeyboardInterrupt:
# Clean up files here
util.Utilities.handle_kbd_interrupt(
args['site'], args['contest'], args['problem'])
if __name__ == '__main__':
main()
| ACedIt | /ACedIt-1.2.1.tar.gz/ACedIt-1.2.1/acedit/main.py | main.py |
######
Readme
######
Description
===========
PyAChemKit is a collection of Artificial Chemistry software written in Python - a library and collection of tools.
Artificial Chemistry (AChem) is a spin-off topic of Artificial Life. AChem is aimed at emergence of life from
non-living environment - primordial soup etc.
Installation
============
To install on Ubuntu Linux, run ::
sudo easy_install -U AChemKit
This package should work on other Linux distributions and versions of Windows, but is untested.
This package requires the following:
* Python >= 2.6 http://www.python.org/
Some features use the following:
* NetworkX
* GraphViz http://www.graphviz.org/
Optionally, the following can be installed to improve performance:
* Psyco http://psyco.sourceforge.net
* PyPy http://codespeak.net/pypy
Source
======
The latest version of the source code is available from https://github.com/afaulconbridge/PyAChemKit
The source code additionally requires the following:
* Sphinx >= 1.0 http://sphinx.pocoo.org/
* Graphviz http://www.graphviz.org/
* Make http://www.gnu.org/software/make/
* LaTeX http://www.latex-project.org/
* PyLint >=0.13.0 http://www.logilab.org/project/pylint/
* Coverage http://nedbatchelder.com/code/coverage/
For a Debian-based Linux distrbution --- e.g. Debian, Ubuntu --- these can be installed / updated with::
make setup
(Note, LaTeX is not installed via this method because it is very large. Run ``sudo apt-get install texlive-full`` if you want to be able to compile the PDF documentation.)
There is a makefile that will run some useful tasks for you (generate documentation, test, benchmark). This can be accessed by running the following command::
make help
Copyright
=========
This project is licensed under a modified-BSD license. See the fie ``COPYRIGHT`` for details.
| AChemKit | /AChemKit-0.3.0.tar.gz/AChemKit-0.3.0/README.txt | README.txt |
from setuptools import setup, find_packages
setup(name="AChemKit",
version="0.3.0",
author="Adam Faulconbridge",
author_email="afaulconbridge@googlemail.com",
# packages=["AChemKit", "AChemKit/utils", "AChemKit/tools"],
packages = find_packages(),
classifiers = ["Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Topic :: Scientific/Engineering :: Artificial Life",
"Topic :: Scientific/Engineering :: Chemistry",
"Topic :: Software Development :: Libraries :: Python Modules"],
url="https://github.com/afaulconbridge/PyAChemKit",
download_url="http://pypi.python.org/packages/source/A/AChemKit/AChemKit-0.1.0.tar.gz#md5=a4b09ccb7ce61642305e3ef5874ac924",
description="An Artificial Chemistry Tookit",
long_description=open("README.txt").read() )
| AChemKit | /AChemKit-0.3.0.tar.gz/AChemKit-0.3.0/setup.py | setup.py |
Search.setIndex({objects:{"AChemKit.utils.bag_test.TestOrderedFrozenBag":{test_iter:[2,1,1],setUp:[2,1,1],cls:[2,4,1]},"AChemKit.reactionnet":{ReactionNetwork:[4,3,1]},"AChemKit.tools":{chem_to_dot:[1,0,1],chem_to_pdf:[1,0,1],log_to_chem:[1,0,1],chem_pp:[1,0,1],chem_uniform:[1,0,1],chem_linear:[1,0,1]},"AChemKit.utils":{utils_test:[2,0,1],bag:[2,0,1],simpledot:[2,0,1],bag_test:[2,0,1],utils:[2,0,1]},"AChemKit.utils.bag_test.TestFrozenBag":{test_len:[2,1,1],test_hash:[2,1,1],setUp:[2,1,1],test_pickle:[2,1,1],test_repr:[2,1,1],test_iter:[2,1,1],test_eq:[2,1,1],test_str:[2,1,1],cls:[2,4,1]},"AChemKit.properties_wnx":{make_catalysis_graph:[4,2,1],make_linkage_graph:[4,2,1]},"AChemKit.utils.bag_test.TestBag":{test_add:[2,1,1],setUp:[2,1,1],test_discard:[2,1,1],test_hash:[2,1,1],cls:[2,4,1]},"AChemKit.sims_simple.AChemAbstract":{react:[4,1,1]},"AChemKit.bucket.Bucket":{from_file:[4,5,1],reactionnet:[4,4,1],from_string:[4,5,1],from_filename:[4,5,1]},"AChemKit.utils.bag":{FrozenBag:[2,3,1],OrderedFrozenBagCache:[2,3,1],OrderedBag:[2,3,1],Bag:[2,3,1],OrderedFrozenBag:[2,3,1]},"AChemKit.sims_simple_test.TestStepwise":{test_basic:[4,1,1]},"AChemKit.sims_simple_test":{TestItterative:[4,3,1],TestStepwise:[4,3,1]},"AChemKit.randomnet_test.TestUniform":{test_nprods_tuple:[4,1,1],test_rate_int:[4,1,1],test_nmols_tuple:[4,1,1],test_nmols_dict:[4,1,1],test_nreacts_int:[4,1,1],test_nprods_list:[4,1,1],test_nreacts_dict:[4,1,1],test_nprods_none_tuple:[4,1,1],test_nprods_none_dict:[4,1,1],test_rate_dict:[4,1,1],test_nprods_dict:[4,1,1],test_rate_float:[4,1,1],test_nprods_int:[4,1,1],test_nreacts_tuple:[4,1,1],test_rate_tuple:[4,1,1],test_rate_list:[4,1,1],test_nreacts_list:[4,1,1],test_nmols_int:[4,1,1],setUp:[4,1,1],test_nmols_list:[4,1,1]},"AChemKit.sims_gillespie_test":{TestGillespie:[4,3,1]},"AChemKit.reactionnet_test":{TestReactionNetwork:[4,3,1],TestReactionNetwork_from_string:[4,3,1]},AChemKit:{reactionnetdot_test:[4,0,1],randomnet:[4,0,1],sims_simple_test:[4,0,1],reactionnetdot:[4,0,1],utils:[2,0,1],randomnet_test:[4,0,1],bucket:[4,0,1],reactionnet_test:[4,0,1],reactionnet:[4,0,1],sims_simple:[4,0,1],properties_motif:[4,0,1],sims_gillespie:[4,0,1],properties_wnx:[4,0,1],sims_gillespie_test:[4,0,1],tools:[1,0,1],properties:[4,0,1],"__init__":[4,0,1]},"AChemKit.reactionnet_test.TestReactionNetwork":{setUp:[4,1,1],test_reactions:[4,1,1],test_reaction_to_string:[4,1,1],test_to_string:[4,1,1],test_seen:[4,1,1],test_equal:[4,1,1],test_hash:[4,1,1],test_rate:[4,1,1]},"AChemKit.sims_gillespie.Gillespie":{next_reaction:[4,1,1]},"AChemKit.reactionnet.ReactionNetwork":{from_file:[4,5,1],reactions:[4,4,1],from_filename:[4,5,1],rate:[4,1,1],rates:[4,1,1],reaction_to_string:[4,5,1],seen:[4,4,1],from_string:[4,5,1],from_reactions:[4,5,1]},"AChemKit.utils.bag.OrderedBag":{discard:[2,1,1],add:[2,1,1]},"AChemKit.properties":{get_reversible:[4,2,1],has_autocatalysis_direct:[4,2,1],has_synthesis:[4,2,1],has_varying_rates:[4,2,1],get_decomposition:[4,2,1],has_catalysis_direct:[4,2,1],get_divergence:[4,2,1],get_catalysis_direct:[4,2,1],has_divergence:[4,2,1],has_reversible:[4,2,1],get_autocatalysis_direct:[4,2,1],not_conservation_mass:[4,2,1],has_decomposition:[4,2,1],get_synthesis:[4,2,1]},"AChemKit.sims_simple":{simulate_stepwise_iter:[4,2,1],simulate_stepwise_multiprocessing_iter:[4,2,1],simulate_itterative:[4,2,1],simulate_itterative_iter:[4,2,1],simulate_stepwise_multiprocessing:[4,2,1],AChemReactionNetwork:[4,3,1],AChemAbstract:[4,3,1],simulate_stepwise:[4,2,1]},"AChemKit.sims_gillespie_test.TestGillespie":{test_basic:[4,1,1],setUp:[4,1,1]},"AChemKit.randomnet":{combinations_with_replacement:[4,2,1],Linear:[4,2,1],Uniform:[4,2,1]},"AChemKit.bucket.Event":{reactants:[4,4,1],products:[4,4,1],time:[4,4,1]},"AChemKit.randomnet_test":{TestLinear:[4,3,1],TestUniform:[4,3,1]},"AChemKit.utils.bag.FrozenBag":{count:[2,1,1]},"AChemKit.utils.simpledot":{SimpleDot:[2,3,1]},"AChemKit.reactionnet_test.TestReactionNetwork_from_string":{setUp:[4,1,1]},"AChemKit.utils.bag_test":{TestBag:[2,3,1],TestOrderedFrozenBag:[2,3,1],TestFrozenBag:[2,3,1],TestOrderedBag:[2,3,1]},"AChemKit.properties_motif":{find_loops:[4,2,1]},"AChemKit.utils.utils_test.TestGetSample":{test_ints:[2,1,1],test_dicts:[2,1,1],test_lists:[2,1,1]},"AChemKit.utils.utils":{unpck:[2,2,1],long_subseq:[2,2,1],get_sample:[2,2,1],memory_free:[2,2,1],myp:[2,2,1]},"AChemKit.sims_simple.AChemReactionNetwork":{react:[4,1,1]},"AChemKit.reactionnetdot_test.TestReactionNetworkDot":{setUp:[4,1,1],test_to_dot_str:[4,1,1]},"AChemKit.utils.utils_test":{TestGetSample:[2,3,1]},"AChemKit.utils.bag.OrderedFrozenBag":{count:[2,1,1],index:[2,1,1]},"AChemKit.reactionnetdot.ReactionNetworkDot":{to_dot:[4,1,1],dot:[4,4,1]},"AChemKit.utils.bag_test.TestOrderedBag":{setUp:[2,1,1],test_hash:[2,1,1],cls:[2,4,1]},"AChemKit.reactionnetdot":{ReactionNetworkDot:[4,3,1]},"AChemKit.utils.simpledot.SimpleDot":{keys:[2,1,1],plot:[2,1,1],add:[2,1,1],get:[2,1,1]},"AChemKit.utils.bag.Bag":{discard:[2,1,1],add:[2,1,1]},"AChemKit.bucket":{Bucket:[4,3,1],Event:[4,3,1]},"AChemKit.randomnet_test.TestLinear":{test_maxlengt_tuple:[4,1,1],test_natoms_dict:[4,1,1],test_natoms_int:[4,1,1],setUp:[4,1,1],test_natoms_tuple:[4,1,1],test_maxlengt_dict:[4,1,1],test_pform_tuple:[4,1,1],test_undirected:[4,1,1],test_directed:[4,1,1],test_pbreak_tuple:[4,1,1]},"AChemKit.reactionnetdot_test":{TestReactionNetworkDot:[4,3,1]},"AChemKit.sims_gillespie":{Gillespie:[4,3,1],simulate_gillespie_iter:[4,2,1],simulate_gillespie:[4,2,1]},"AChemKit.sims_simple_test.TestItterative":{test_basic:[4,1,1],setUp:[4,1,1]}},terms:{get_revers:4,represent:[7,4],all:[2,4,7],code:[5,4],partial:4,edg:2,consum:4,orderedbag:2,has_synthesi:4,has_varying_r:4,overlap:6,follow:[5,2,4,7],teststepwisemultiprocess:[],orderedfrozenbag:2,extra:2,catalyt:4,depend:4,graph:[2,4],readabl:4,descript:[5,0],test_natoms_dict:4,program:[1,2],certain:4,under:5,exit:1,testreactionnetwork:4,test_pickl:2,sourc:[5,0,4],string:[2,4,7],fals:[2,4],util:[0,1,3,2,4],long_subseq:2,novel:4,veri:[5,2],implicitli:2,subprocess:2,leftov:4,list:[2,4],fewer:4,achemreactionnetwork:4,"try":[],item:2,adjust:2,small:2,pleas:[],upper:7,smaller:[],test_nreacts_int:4,speci:[1,7,4],isinst:2,direct:[1,4],annot:[],utils_test:[0,3,2,4],rate:[1,7,4],design:[2,4],bucket_file_format:4,test_list:2,pass:[2,4],simulate_stepwise_multiprocessing_it:4,chem:[0,1,7,4],odd:2,compat:2,index:[2,4],abc:[2,4],testfrozenbag:2,from_react:4,defin:4,resembl:4,abl:[5,4],uniform:[1,4],access:[5,2,4],test_nprods_int:4,version:5,invers:[7,4],"new":2,net:5,method:[5,2,4],unlabel:4,full:[5,1,4],themselv:4,get_diverg:4,gener:[5,1,7,4],even:2,here:[7,4],behaviour:[],nedbatcheld:5,ubuntu:5,path:4,standard:2,modifi:5,abstractli:7,valu:[2,4],convert:[2,4,7],produc:[1,2,4],test_pform_tupl:4,copyright:[5,0],larger:[],credit:4,amount:2,min_shar:4,loop:4,permit:2,spin:5,implement:4,kauffman:4,test_equ:4,via:[5,2,4],term:7,nreaction:4,instr:4,win:4,modul:[0,1,3,2,4],testorderedbag:2,apt:5,filenam:[2,4],unix:[],api:[1,2,4],duck:4,instal:[5,0,4],catalsyst:4,plot:2,from:[5,1,2,4,7],describ:7,would:6,prove:4,doubl:4,two:4,coverag:5,next:4,symbol:7,live:5,call:[2,4],recommend:7,dict:[2,4],type:[1,2,4],until:4,more:[7,4],sort:[6,2,4,7],theunivers:7,diamond:4,desir:4,randomnet:[0,3,4],pylint:5,visual:4,accept:[2,4],examin:1,particular:[1,7,4],known:2,cach:4,achemabstract:4,must:[2,4],none:[2,4],join:4,alia:2,setup:[5,2,4],work:[5,1,2,4],uniqu:2,histori:4,soup:5,kwarg:4,can:[5,1,2,4,7],teststepwis:4,test_repr:2,undirect:1,give:4,process:[1,4],sudo:5,share:4,not_conservation_mass:4,indic:4,topic:5,minimum:4,want:5,explos:4,phrase:[],test_nmols_list:4,occur:4,alwai:2,multipl:[2,4,7],thing:[6,2,4,7],rather:4,write:1,how:4,instead:4,simpl:4,either:[7,4],map:2,product:[7,4],recogn:2,test_maxlengt_dict:4,max:4,stuart:4,lump:2,befor:7,wrong:4,lot:4,mai:[2,4,7],end:[2,4],grop:4,data:[2,4,7],parallel:[2,4],vera:4,"short":[],attempt:[2,4],classmethod:4,test_nprods_none_dict:4,stdin:1,explicit:[7,4],correspond:7,assign:4,caus:[2,4],inform:1,combin:[7,4],allow:[],untest:5,egg:7,order:[2,4,7],mutableset:2,origin:[2,4],help:[5,1],over:4,becaus:[5,4],graphviz:[5,1,2],paper:4,through:4,test_hash:[2,4],still:[2,4],mainli:4,bag_test:[0,3,2,4],paramet:4,reactant:[7,4],style:1,afaulconbridg:5,reactionnetworkk:[],simulate_itt:4,distrbut:5,simpledot:[0,1,3,2,4],window:5,fie:5,hidden:4,therefor:2,might:4,non:[5,4],good:6,"return":[2,4],thei:[1,2,4],python:[5,2,4],auto:[],spell:[],autodocu:[],easy_instal:5,"break":[1,2,4],mention:[],test_seen:4,now:7,from_str:4,multiprocess:4,from_filenam:4,name:[1,2,4,7],anyth:2,revers:[7,4],separ:[7,4],arrow:4,each:[7,4],found:[],reactionnetot:[],achem:[5,4],testgetsampl:2,protein:4,pygment:[],compil:5,weight:[2,4],replac:4,individu:4,testreactionnetwork_from_str:4,equivlanet:4,adam:4,runtest:[2,4],wrap:[2,4],linkag:4,"static":[],expect:4,ommit:1,todo:4,orient:4,shown:4,unbound:4,network:[1,4],space:7,goe:[],open:4,test_nprods_none_tupl:4,test_nprods_dict:4,highli:[],texliv:5,memory_fre:2,suitabl:1,rel:2,reactionnetwork_to_multidigraph:[],gov:4,correct:[],linear:[1,4],maxmol:4,situat:[7,4],given:[2,4],free:[2,4],infil:[1,4],base:[5,2,4],theori:6,dictionari:[2,4],latest:5,put:[2,4],org:5,reactionnetdot_test:[0,3,4],driven:1,indent:[],logilab:5,frequenc:2,could:[6,4],omit:7,keep:2,filter:4,turn:2,length:[1,2,4],enforc:[2,4],test_pbreak_tupl:4,principl:4,achemkit:[0,1,2,3,4,5,7],attribtu:4,first:[],oper:4,softwar:5,rang:4,core:4,directli:4,test_nreacts_dict:4,onc:4,number:[1,6,2,4,7],restrict:7,date:4,unlik:4,alreadi:4,done:[6,7,4],wrapper:4,blank:[1,7,4],miss:[],fanci:[],test_rate_tupl:4,differ:4,start:[7,4],sims_simple_test:[0,3,4],unknown:[],licens:5,system:7,messag:1,checker:1,master:[],circl:4,properties_wnx:[0,3,4],white:7,molecular:[1,7,4],includ:[],option:[5,1,6],cope:2,tool:[5,0,1,3,4],copi:4,test_rate_int:4,specifi:[2,4,7],github:5,pars:[],conserv:4,rst:[],off:5,testitt:4,than:[7,4],chem_linear:[0,1,3,4],liter:[],properties_motif:[0,3,4],provid:[1,2,4],remov:7,tree:4,second:4,charact:7,project:[5,0,3,6],reus:[],str:2,were:[2,4],arrang:4,grei:4,seri:4,randomli:4,mini:4,comput:4,argument:[2,4],packag:[0,1,2,3,4,5],seed:1,have:[2,4,7],betaalpha:4,need:[2,4],seen:4,breakfast:7,"null":4,engin:2,subgraph:2,equival:4,min:4,destroi:7,self:2,violat:4,note:[5,2,4],also:[2,4,7],without:4,take:4,which:[2,4],environ:5,singl:[2,4,7],test_add:2,pipelin:1,printer:1,unless:4,distribut:[5,2,4],track:4,object:[2,4],mysimpledot:2,react:4,visa:4,automodul:[],pair:4,alpha:7,test_to_dot_str:4,"class":[2,4],sub:4,latex:5,don:[],compar:4,doc:[],later:[],flow:[],doe:[2,4],test_to_str:4,place:7,determin:4,reactionnetdot:[0,3,4],unchang:4,dot:[1,2,4],currentmodul:[],set:[6,2,4],make_catalysis_graph:4,show:[1,4],text:7,pform:[1,4],syntax:1,particularli:2,networkx:[5,4],pysco:6,find:[],involv:4,current:6,onli:4,explicitli:[2,4],layout:[1,2],pretti:1,has_autocatalysis_direct:4,test_react:4,less:4,activ:4,state:7,makefil:[5,1],should:[5,2,4,7],suppos:4,rich:4,combo:2,neato:2,analys:4,itter:4,count:2,unus:[],variou:[1,2,4,7],get:[5,2,4],get_autocatalysis_direct:4,pypi:[5,6],autom:1,cannot:[2,4,7],longest:2,shedskin:6,"import":[],report:[],reconstruct:4,nelli:4,test_nreacts_list:4,requir:[5,4],autocatalysi:4,nmol:4,organ:6,pydot:2,get_catalysis_direct:4,intrins:4,bag:[0,3,2,4],common:2,contain:[2,4,7],where:[2,4],simulate_gillespie_it:4,view:4,get_decomposit:4,pbreak:[1,4],"float":[2,7],bucket:[0,3,4],methodnam:[2,4],see:[5,1,4],temporarili:7,result:2,arg:[2,4],testcas:[2,4],reaction_to_str:4,test_maxlengt_tupl:4,improv:[5,6],"__lt__":4,best:4,infinit:4,appear:[7,4],said:4,correctli:[2,4],intervalsc:4,someth:4,testreactionnetworkdot:4,written:[5,4],smallest:4,between:[7,4],drawn:4,approach:4,next_react:4,multidigraph:[],attribut:[2,4],altern:4,signatur:[],sortabl:2,kei:[2,4],numer:7,sims_gillespi:[0,3,4],test_pfrom_tupl:[],cycl:[],entir:4,chem_pp:[0,1,3,4],consumpt:4,come:1,addit:[7,4],both:[2,4,7],test_bas:4,test_nprods_list:4,hashabl:[2,4],howev:[2,4],alon:1,etc:[5,2,4],test_rate_dict:4,tutori:[],maxlength:4,futher:6,pdf:[5,1,2],provis:2,com:5,alphbeta:4,testuniform:4,comment:7,raf:6,technic:4,point:[7,4],testbag:2,unittest:[2,4],outfil:1,from_fil:4,testlinear:4,littl:2,ioerror:[],respect:2,assum:4,duplic:[2,4],test_natoms_tupl:4,creat:[2,4,7],test_eq:2,addition:5,primordi:5,log_to_chem:[0,1,3,4],empti:4,compon:[7,4],netbook:[],treat:4,basic:4,valueerror:[2,4],nonexist:[],nodenam:2,test_len:2,probe:4,reaction:[1,7,4],imag:2,simulate_itterative_it:4,ani:[6,2,4,7],togeth:[2,4],demand:4,"__eq__":4,convers:4,"case":[7,4],determinist:4,test_nprods_tupl:4,main:4,look:[],gnu:5,plain:2,properti:[0,3,4],sourceforg:5,aim:5,cast:[2,4],calcul:4,unifi:2,outcom:4,error:4,"__hash__":4,codespeak:5,document:[5,0,7,4],"__ge__":4,stdout:1,metric:4,readm:[5,0],them:[2,4],cluster:2,itself:4,hash:4,test_rate_list:4,test_discard:2,"__init__":[],autocatalyt:4,test_it:2,welcom:0,simulate_stepwis:4,perform:[5,2],make:[5,2,4],shorten:4,simuatlion:4,same:[2,4,7],member:[],test_undirect:4,wierd:2,instanc:[2,4],split:[],largest:4,test_direct:4,conflict:[],complet:1,http:[5,4],optim:6,molecul:[1,4],nreactant:4,chem_to_pdf:[0,1,3,4],kit:[],orderedfrozenbagcach:2,reactionnetworkdot:4,rais:[2,4],initi:4,mani:4,test_nmols_int:4,typic:4,subpackag:[0,3,4],lower:7,task:5,kept:[],els:2,random:[1,2,4],chemistri:[5,7,4],well:[6,7,4],inherit:4,unpck:2,exampl:[1,2,4,7],command:[5,1],thi:[1,2,4,5,6,7],diverg:4,programm:2,fail:4,usual:4,interpret:[],construct:[1,4],identifi:4,execut:[2,4],pyachemkit:5,userdict:2,mol:4,test_nreacts_tupl:4,shape:4,psyco:5,human:4,gillespi:4,rest:[],speed:6,yet:[],simulate_stepwise_it:4,chemsistri:[],shoud:[],samplea:[],easi:2,mix:4,restructuredtext:[],sizeprop:[],theoret:4,reactionnet:[0,3,4],add:[2,4,7],valid:[7,4],blob:[],versa:4,appli:4,bond:1,input:[1,7],har:[2,4],subsequ:[2,4],match:2,nproduct:4,around:4,hypothesi:4,format:[0,1,2,4,7],read:[1,4],when:[2,4,7],rng:[2,4],simulate_stepwise_multiprocess:4,avali:6,know:[],"__gt__":4,part:4,chemic:6,apart:4,linux:5,autodoc:[],like:[2,4],specif:4,chem_to_dot:[0,1,3,4],has_revers:4,test_:[],collect:[5,2,4],"boolean":4,make_linkage_graph:4,output:[1,2,7],manag:2,"function":[2,4],www:5,interact:4,get_synthesi:4,infilenam:4,sure:4,sampl:[6,2,4],integ:7,find_loop:4,guarante:2,librari:[5,2,4],slice:2,lead:4,simulate_gillespi:4,test_str:2,avoid:7,size:6,definit:4,subclass:4,flatten:2,sims_gillespie_test:[0,3,4],larg:[5,4],sequenc:2,prog:2,three:2,complic:4,prob:6,reactionnetwork:4,refer:[],machin:4,"__ne__":4,run:5,journal:4,usag:1,test_rate_float:4,biologi:4,obei:4,prerequisit:4,immut:4,sims_simpl:[0,3,4],"__all__":[],comparison:[2,4],exhibit:4,test_nmols_dict:4,materi:7,memori:[2,4],simul:4,stand:1,constructor:4,natom:[1,4],discard:2,mean:2,chem_uniform:[0,1,3,4],block:[],emphasi:4,satisfi:2,digraph:2,within:[2,4,7],functional:4,automat:[],compos:4,bsd:5,test_rat:4,to_dot:4,ensur:[],chang:4,updat:5,artifici:[5,4],storag:7,your:[],per:[1,4],has_diverg:4,fast:4,span:4,log:[1,4],wai:[2,4],spam:7,properties_wnc:[],errno:[],support:[7,4],megabyt:2,iter:[2,4],has_catalysis_direct:4,custom:4,avail:5,strict:2,reli:4,interfac:2,pocoo:5,test_natoms_int:4,synthesi:4,ineffeici:[],lowest:[],testgillespi:4,form:4,catalysi:[6,4],tupl:[2,4],criteria:2,"_abcol":2,decomposit:4,link:4,catalyst:[7,4],atom:[1,4],line:[1,7,4],inlin:4,"true":[2,4],bug:[0,6],suppli:[],notat:2,made:2,get_sampl:2,consist:2,possibl:[1,4],"default":[2,7],logic:4,multidigraph_make_flow:[],maximum:[1,4],maxtim:4,intermedi:7,below:7,"__le__":4,problem:4,similar:4,connect:[],featur:[5,0,6],constant:7,evalu:4,"int":2,myp:2,"abstract":[2,4],toctre:[],life:5,repres:[7,4],strongli:[],exist:[2,4,7],file:[0,1,2,4,7],some:[5,2,4,7],subclassess:4,proport:[],check:[2,4],probabl:[1,4],again:7,quot:[2,7],test_dict:2,titl:[],has_decomposit:4,detail:[5,4],event:4,other:[5,7,4],futur:1,allreact:4,test:[5,2,4],ignor:7,you:[5,2,4],nice:[6,4],node:2,draw:2,repeat:4,intend:[1,2],stringio:4,find_cycl:[],benchmark:5,dictmixin:2,frozenbag:2,test_reaction_to_str:4,consid:[],uniformli:[2,4],debian:5,reduc:4,sphinx:5,faster:6,algorithm:[1,6,4],randomnet_test:[0,3,4],directori:[],combinations_with_replac:4,lanl:4,pseudo:1,testorderedfrozenbag:2,reactionnet_test:[0,3,4],tricki:4,emerg:5,mass:4,maxdepth:4,time:[7,4],repsent:2,escap:7,wnx:[],test_nmols_tupl:4,test_int:2},objtypes:{"0":"py:module","1":"py:method","2":"py:function","3":"py:class","4":"py:attribute","5":"py:classmethod"},titles:["Welcome to AChemKit’s documentation!","tools Package","utils Package","Project Modules","AChemKit Package","Readme","To-Dos","File Formats"],objnames:{"0":"Python module","1":"Python method","2":"Python function","3":"Python class","4":"Python attribute","5":"Python class method"},filenames:["index","AChemKit.tools","AChemKit.utils","modules","AChemKit","README","TODO","fileformats"]}) | AChemKit | /AChemKit-0.3.0.tar.gz/AChemKit-0.3.0/doc/html/searchindex.js | searchindex.js |
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>Readme — AChemKit v0.1 documentation</title>
<link rel="stylesheet" href="_static/default.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '',
VERSION: '0.1',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
};
</script>
<script type="text/javascript" src="_static/jquery.js"></script>
<script type="text/javascript" src="_static/underscore.js"></script>
<script type="text/javascript" src="_static/doctools.js"></script>
<link rel="top" title="AChemKit v0.1 documentation" href="index.html" />
<link rel="next" title="File Formats" href="fileformats.html" />
<link rel="prev" title="Welcome to AChemKit’s documentation!" href="index.html" />
</head>
<body>
<div class="related">
<h3>Navigation</h3>
<ul>
<li class="right" style="margin-right: 10px">
<a href="genindex.html" title="General Index"
accesskey="I">index</a></li>
<li class="right" >
<a href="py-modindex.html" title="Python Module Index"
>modules</a> |</li>
<li class="right" >
<a href="fileformats.html" title="File Formats"
accesskey="N">next</a> |</li>
<li class="right" >
<a href="index.html" title="Welcome to AChemKit’s documentation!"
accesskey="P">previous</a> |</li>
<li><a href="index.html">AChemKit v0.1 documentation</a> »</li>
</ul>
</div>
<div class="document">
<div class="documentwrapper">
<div class="bodywrapper">
<div class="body">
<div class="section" id="readme">
<h1>Readme<a class="headerlink" href="#readme" title="Permalink to this headline">¶</a></h1>
<div class="section" id="description">
<h2>Description<a class="headerlink" href="#description" title="Permalink to this headline">¶</a></h2>
<p>PyAChemKit is a collection of Artificial Chemistry software written in Python - a library and collection of tools.</p>
<p>Artificial Chemistry (AChem) is a spin-off topic of Artificial Life. AChem is aimed at emergence of life from
non-living environment - primordial soup etc.</p>
</div>
<div class="section" id="installation">
<h2>Installation<a class="headerlink" href="#installation" title="Permalink to this headline">¶</a></h2>
<p>To install on Ubuntu Linux, run</p>
<div class="highlight-python"><pre>sudo easy_install -U AChemKit</pre>
</div>
<p>This package should work on other Linux distributions and versions of Windows, but is untested.</p>
<p>This package requires the following:</p>
<ul class="simple">
<li>Python >= 2.6 <a class="reference external" href="http://www.python.org/">http://www.python.org/</a></li>
</ul>
<p>Some features use the following:</p>
<ul class="simple">
<li>NetworkX</li>
<li>GraphViz <a class="reference external" href="http://www.graphviz.org/">http://www.graphviz.org/</a></li>
</ul>
<p>Optionally, the following can be installed to improve performance:</p>
<ul class="simple">
<li>Psyco <a class="reference external" href="http://psyco.sourceforge.net">http://psyco.sourceforge.net</a></li>
<li>PyPy <a class="reference external" href="http://codespeak.net/pypy">http://codespeak.net/pypy</a></li>
</ul>
</div>
<div class="section" id="source">
<h2>Source<a class="headerlink" href="#source" title="Permalink to this headline">¶</a></h2>
<p>The latest version of the source code is available from <a class="reference external" href="https://github.com/afaulconbridge/PyAChemKit">https://github.com/afaulconbridge/PyAChemKit</a></p>
<p>The source code additionally requires the following:</p>
<ul class="simple">
<li>Sphinx >= 1.0 <a class="reference external" href="http://sphinx.pocoo.org/">http://sphinx.pocoo.org/</a></li>
<li>Graphviz <a class="reference external" href="http://www.graphviz.org/">http://www.graphviz.org/</a></li>
<li>Make <a class="reference external" href="http://www.gnu.org/software/make/">http://www.gnu.org/software/make/</a></li>
<li>LaTeX <a class="reference external" href="http://www.latex-project.org/">http://www.latex-project.org/</a></li>
<li>PyLint >=0.13.0 <a class="reference external" href="http://www.logilab.org/project/pylint/">http://www.logilab.org/project/pylint/</a></li>
<li>Coverage <a class="reference external" href="http://nedbatchelder.com/code/coverage/">http://nedbatchelder.com/code/coverage/</a></li>
</ul>
<p>For a Debian-based Linux distrbution — e.g. Debian, Ubuntu — these can be installed / updated with:</p>
<div class="highlight-python"><pre>make setup</pre>
</div>
<p>(Note, LaTeX is not installed via this method because it is very large. Run <tt class="docutils literal"><span class="pre">sudo</span> <span class="pre">apt-get</span> <span class="pre">install</span> <span class="pre">texlive-full</span></tt> if you want to be able to compile the PDF documentation.)</p>
<p>There is a makefile that will run some useful tasks for you (generate documentation, test, benchmark). This can be accessed by running the following command:</p>
<div class="highlight-python"><pre>make help</pre>
</div>
</div>
<div class="section" id="copyright">
<h2>Copyright<a class="headerlink" href="#copyright" title="Permalink to this headline">¶</a></h2>
<p>This project is licensed under a modified-BSD license. See the fie <tt class="docutils literal"><span class="pre">COPYRIGHT</span></tt> for details.</p>
</div>
</div>
</div>
</div>
</div>
<div class="sphinxsidebar">
<div class="sphinxsidebarwrapper">
<h3><a href="index.html">Table Of Contents</a></h3>
<ul>
<li><a class="reference internal" href="#">Readme</a><ul>
<li><a class="reference internal" href="#description">Description</a></li>
<li><a class="reference internal" href="#installation">Installation</a></li>
<li><a class="reference internal" href="#source">Source</a></li>
<li><a class="reference internal" href="#copyright">Copyright</a></li>
</ul>
</li>
</ul>
<h4>Previous topic</h4>
<p class="topless"><a href="index.html"
title="previous chapter">Welcome to AChemKit’s documentation!</a></p>
<h4>Next topic</h4>
<p class="topless"><a href="fileformats.html"
title="next chapter">File Formats</a></p>
<h3>This Page</h3>
<ul class="this-page-menu">
<li><a href="_sources/README.txt"
rel="nofollow">Show Source</a></li>
</ul>
<div id="searchbox" style="display: none">
<h3>Quick search</h3>
<form class="search" action="search.html" method="get">
<input type="text" name="q" size="18" />
<input type="submit" value="Go" />
<input type="hidden" name="check_keywords" value="yes" />
<input type="hidden" name="area" value="default" />
</form>
<p class="searchtip" style="font-size: 90%">
Enter search terms or a module, class or function name.
</p>
</div>
<script type="text/javascript">$('#searchbox').show(0);</script>
</div>
</div>
<div class="clearer"></div>
</div>
<div class="related">
<h3>Navigation</h3>
<ul>
<li class="right" style="margin-right: 10px">
<a href="genindex.html" title="General Index"
>index</a></li>
<li class="right" >
<a href="py-modindex.html" title="Python Module Index"
>modules</a> |</li>
<li class="right" >
<a href="fileformats.html" title="File Formats"
>next</a> |</li>
<li class="right" >
<a href="index.html" title="Welcome to AChemKit’s documentation!"
>previous</a> |</li>
<li><a href="index.html">AChemKit v0.1 documentation</a> »</li>
</ul>
</div>
<div class="footer">
© Copyright 2011, Adam Faulconbridge.
Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.0.7.
</div>
</body>
</html> | AChemKit | /AChemKit-0.3.0.tar.gz/AChemKit-0.3.0/doc/html/README.html | README.html |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.