Spaces:
Build error
Build error
/////////////// InitLimitedAPI /////////////// | |
/////////////// CModulePreamble /////////////// | |
// For use in DL_IMPORT/DL_EXPORT macros. | |
// CPython has required PY_LONG_LONG support for years, even if HAVE_LONG_LONG is not defined for us | |
// For the limited API it often makes sense to use Py_LIMITED_API rather than PY_VERSION_HEX | |
// when doing version checks. | |
/* For very preliminary testing purposes. Most variables are set the same as PyPy. | |
The existence of this section does not imply that anything works or is even tested */ | |
// GRAALVM_PYTHON test comes before PyPy test because GraalPython unhelpfully defines PYPY_VERSION | |
// EXPERIMENTAL !! | |
// CYTHON_CLINE_IN_TRACEBACK is currently disabled for the Limited API | |
// PyObject_CallFinalizerFromDealloc is missing and not easily replaced | |
// TODO - we could probably enable CYTHON_USE_FREELISTS by default in future since | |
// this is just a variant of cpython now, but we'd need to be very careful to make | |
// them thread safe. Since it will probably work, let the user decide. | |
// Python 3.11a2 hid _PyLong_FormatAdvancedWriter and _PyFloat_FormatAdvancedWriter | |
// therefore disable unicode writer until a better alternative appears | |
// CYTHON_AVOID_BORROWED_REFS - Avoid borrowed references and always request owned references directly instead. | |
// CYTHON_ASSUME_SAFE_MACROS - Assume that macro calls do not fail and do not raise exceptions. | |
// Py3<3.5.2 does not support _PyThreadState_UncheckedGet(). | |
// FIXME: FastGIL can probably be supported also in CPython 3.12 but needs to be adapted. | |
// CPython 3.6 introduced METH_FASTCALL but with slightly different | |
// semantics. It became stable starting from CPython 3.7. | |
// CYTHON_USE_MODULE_STATE - Use a module state/globals struct tied to the module object. | |
// EXPERIMENTAL !! | |
// Python 3.12a5 deprecated "ma_version_tag" | |
/* Whether to use METH_FASTCALL with a fake backported implementation of vectorcall */ | |
/* These short defines can easily conflict with other code */ | |
/* Compile-time sanity check that these are indeed equal. Github issue #2670. */ | |
enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; | |
// restrict | |
// unused attribute | |
/* for clang __has_cpp_attribute(maybe_unused) is true even before C++17 | |
* but leads to warnings with -pedantic, since it is a C++17 feature */ | |
template<class T> void CYTHON_UNUSED_VAR( const T& ) { } | |
// msvc doesn't set __cplusplus to a useful value | |
typedef unsigned char uint8_t; | |
typedef unsigned short uint16_t; | |
typedef unsigned int uint32_t; | |
typedef unsigned __int8 uint8_t; | |
typedef unsigned __int16 uint16_t; | |
typedef unsigned __int32 uint32_t; | |
typedef unsigned long long __pyx_uintptr_t; | |
typedef unsigned int __pyx_uintptr_t; | |
typedef unsigned __int64 __pyx_uintptr_t; | |
typedef unsigned __int32 __pyx_uintptr_t; | |
typedef uintptr_t __pyx_uintptr_t; | |
/* for clang __has_cpp_attribute(fallthrough) is true even before C++17 | |
* but leads to warnings with -pedantic, since it is a C++17 feature */ | |
template <typename T> | |
struct __PYX_IS_UNSIGNED_IMPL {static const bool value = T(0) < T(-1);}; | |
// reinterpret | |
// TODO: refactor existing code to use those macros | |
// #define __PYX_REINTERPRET_POINTER(pointer_type, pointer) ((pointer_type)(void *)(pointer)) | |
// #define __PYX_RUNTIME_REINTERPRET(type, var) (*(type *)(&var)) | |
/////////////// CInitCode /////////////// | |
// inline attribute | |
/////////////// CppInitCode /////////////// | |
// inline attribute | |
// Work around clang bug https://stackoverflow.com/questions/21847816/c-invoke-nested-template-class-destructor | |
template<typename T> | |
void __Pyx_call_destructor(T& x) { | |
x.~T(); | |
} | |
// Used for temporary variables of "reference" type. | |
template<typename T> | |
class __Pyx_FakeReference { | |
public: | |
__Pyx_FakeReference() : ptr(NULL) { } | |
// __Pyx_FakeReference(T& ref) : ptr(&ref) { } | |
// Const version needed as Cython doesn't know about const overloads (e.g. for stl containers). | |
__Pyx_FakeReference(const T& ref) : ptr(const_cast<T*>(&ref)) { } | |
T *operator->() { return ptr; } | |
T *operator&() { return ptr; } | |
operator T&() { return *ptr; } | |
// TODO(robertwb): Delegate all operators (or auto-generate unwrapping code where needed). | |
template<typename U> bool operator ==(const U& other) const { return *ptr == other; } | |
template<typename U> bool operator !=(const U& other) const { return *ptr != other; } | |
template<typename U> bool operator==(const __Pyx_FakeReference<U>& other) const { return *ptr == *other.ptr; } | |
template<typename U> bool operator!=(const __Pyx_FakeReference<U>& other) const { return *ptr != *other.ptr; } | |
private: | |
T *ptr; | |
}; | |
/////////////// PythonCompatibility /////////////// | |
// Note that the limited API doesn't know about PyCodeObject, so the type of this | |
// is PyObject (unlike for the main API) | |
static CYTHON_INLINE PyObject* __Pyx_PyCode_New(int a, int p, int k, int l, int s, int f, | |
PyObject *code, PyObject *c, PyObject* n, PyObject *v, | |
PyObject *fv, PyObject *cell, PyObject* fn, | |
PyObject *name, int fline, PyObject *lnos) { | |
// Backout option for generating a code object. | |
// PyCode_NewEmpty isn't in the limited API. Therefore the two options are | |
// 1. Python call of the code type with a long list of positional args. | |
// 2. Generate a code object by compiling some trivial code, and customize. | |
// We use the second because it's less sensitive to changes in the code type | |
// constructor with version. | |
PyObject *exception_table = NULL; | |
PyObject *types_module=NULL, *code_type=NULL, *result=NULL; | |
PyObject *version_info; /* borrowed */ | |
PyObject *py_minor_version = NULL; | |
long minor_version = 0; | |
PyObject *type, *value, *traceback; | |
// we must be able to call this while an exception is happening - thus clear then restore the state | |
PyErr_Fetch(&type, &value, &traceback); | |
minor_version = 11; | |
// we don't yet need to distinguish between versions > 11 | |
// Note that from 3.13, when we do, we can use Py_Version | |
if (!(version_info = PySys_GetObject("version_info"))) goto end; | |
if (!(py_minor_version = PySequence_GetItem(version_info, 1))) goto end; | |
minor_version = PyLong_AsLong(py_minor_version); | |
Py_DECREF(py_minor_version); | |
if (minor_version == -1 && PyErr_Occurred()) goto end; | |
if (!(types_module = PyImport_ImportModule("types"))) goto end; | |
if (!(code_type = PyObject_GetAttrString(types_module, "CodeType"))) goto end; | |
if (minor_version <= 7) { | |
// 3.7: | |
// code(argcount, kwonlyargcount, nlocals, stacksize, flags, codestring, | |
// constants, names, varnames, filename, name, firstlineno, | |
// lnotab[, freevars[, cellvars]]) | |
(void)p; | |
result = PyObject_CallFunction(code_type, "iiiiiOOOOOOiOO", a, k, l, s, f, code, | |
c, n, v, fn, name, fline, lnos, fv, cell); | |
} else if (minor_version <= 10) { | |
// 3.8, 3.9, 3.10 | |
// code(argcount, posonlyargcount, kwonlyargcount, nlocals, stacksize, | |
// flags, codestring, constants, names, varnames, filename, name, | |
// firstlineno, lnotab[, freevars[, cellvars]]) | |
// 3.10 switches lnotab for linetable, but is otherwise the same | |
result = PyObject_CallFunction(code_type, "iiiiiiOOOOOOiOO", a,p, k, l, s, f, code, | |
c, n, v, fn, name, fline, lnos, fv, cell); | |
} else { | |
// 3.11, 3.12 | |
// code(argcount, posonlyargcount, kwonlyargcount, nlocals, stacksize, | |
// flags, codestring, constants, names, varnames, filename, name, | |
// qualname, firstlineno, linetable, exceptiontable, freevars=(), cellvars=(), /) | |
// We use name and qualname for simplicity | |
if (!(exception_table = PyBytes_FromStringAndSize(NULL, 0))) goto end; | |
result = PyObject_CallFunction(code_type, "iiiiiiOOOOOOOiOO", a,p, k, l, s, f, code, | |
c, n, v, fn, name, name, fline, lnos, exception_table, fv, cell); | |
} | |
end: | |
Py_XDECREF(code_type); | |
Py_XDECREF(exception_table); | |
Py_XDECREF(types_module); | |
if (type) { | |
PyErr_Restore(type, value, traceback); | |
} | |
return result; | |
} | |
// Cython uses these constants but they are not available in the limited API. | |
// (it'd be nice if there was a more robust way of looking these up) | |
static CYTHON_INLINE PyCodeObject* __Pyx_PyCode_New(int a, int p, int k, int l, int s, int f, | |
PyObject *code, PyObject *c, PyObject* n, PyObject *v, | |
PyObject *fv, PyObject *cell, PyObject* fn, | |
PyObject *name, int fline, PyObject *lnos) { | |
// As earlier versions, but | |
// 1. pass an empty bytes string as exception_table | |
// 2. pass name as qualname (TODO this might implementing properly in future) | |
PyCodeObject *result; | |
PyObject *empty_bytes = PyBytes_FromStringAndSize("", 0); /* we don't have access to __pyx_empty_bytes here */ | |
if (!empty_bytes) return NULL; | |
result = | |
PyUnstable_Code_NewWithPosOnlyArgs | |
PyCode_NewWithPosOnlyArgs | |
(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, name, fline, lnos, empty_bytes); | |
Py_DECREF(empty_bytes); | |
return result; | |
} | |
// already defined for Stackless Python (all versions) and C-Python >= 3.7 | |
// value if defined: Stackless Python < 3.6: 0x80 else 0x100 | |
// new in CPython 3.6, but changed in 3.7 - see | |
// positional-only parameters: | |
// https://bugs.python.org/issue29464 | |
// const args: | |
// https://bugs.python.org/issue32240 | |
typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); | |
// new in CPython 3.7, used to be old signature of _PyCFunctionFast() in 3.6 | |
typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, | |
Py_ssize_t nargs, PyObject *kwnames); | |
typedef PyObject *(*__pyx_vectorcallfunc)(PyObject *callable, PyObject *const *args, | |
size_t nargsf, PyObject *kwnames); | |
// These PyCFunction related macros get redefined in CythonFunction.c. | |
// We need our own copies because the inline functions in CPython have a type-check assert | |
// that breaks with a CyFunction in debug mode. | |
// It's probably easier for non-CPythons to support PyCFunction_GET_FUNCTION() than the object struct layout. | |
// Unused in CYTHON_COMPILING_IN_LIMITED_API. | |
static CYTHON_INLINE PyObject* __Pyx_CyOrPyCFunction_GET_SELF(PyObject *func) { | |
return (__Pyx_CyOrPyCFunction_GET_FLAGS(func) & METH_STATIC) ? NULL : ((PyCFunctionObject*)func)->m_self; | |
} | |
// Only used if CYTHON_COMPILING_IN_CPYTHON. | |
static CYTHON_INLINE int __Pyx__IsSameCFunction(PyObject *func, void *cfunc) { | |
return PyCFunction_Check(func) && PyCFunction_GetFunction(func) == (PyCFunction) cfunc; | |
return PyCFunction_Check(func) && PyCFunction_GET_FUNCTION(func) == (PyCFunction) cfunc; | |
} | |
// PEP-573: PyCFunction holds reference to defining class (PyCMethodObject) | |
typedef PyObject *(*__Pyx_PyCMethod)(PyObject *, PyTypeObject *, PyObject *const *, size_t, PyObject *); | |
//#elif PY_VERSION_HEX >= 0x03050200 | |
// Actually added in 3.5.2, but compiling against that does not guarantee that we get imported there. | |
//#elif PY_VERSION_HEX >= 0x03050200 | |
// Actually added in 3.5.2, but compiling against that does not guarantee that we get imported there. | |
static CYTHON_INLINE void *__Pyx_PyModule_GetState(PyObject *op) | |
{ | |
void *result; | |
result = PyModule_GetState(op); | |
if (!result) | |
Py_FatalError("Couldn't find the module state"); | |
return result; | |
} | |
// TSS (Thread Specific Storage) API | |
typedef int Py_tss_t; | |
static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { | |
*key = PyThread_create_key(); | |
return 0; /* PyThread_create_key reports success always */ | |
} | |
static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { | |
Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); | |
*key = Py_tss_NEEDS_INIT; | |
return key; | |
} | |
static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { | |
PyObject_Free(key); | |
} | |
static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { | |
return *key != Py_tss_NEEDS_INIT; | |
} | |
static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { | |
PyThread_delete_key(*key); | |
*key = Py_tss_NEEDS_INIT; | |
} | |
static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { | |
return PyThread_set_key_value(*key, value); | |
} | |
static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { | |
return PyThread_get_key_value(*key); | |
} | |
// PyThread_delete_key_value(key) is equivalent to PyThread_set_key_value(key, NULL) | |
// PyThread_ReInitTLS() is a no-op | |
[[deprecated("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6")]] | |
__attribute__ ((__deprecated__("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6"))) | |
__declspec(deprecated("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6")) | |
static CYTHON_INLINE int PyGILState_Check(void) { | |
// PyGILState_Check is used to decide whether to release the GIL when we don't | |
// know that we have it. For PyPy2 it isn't possible to check. | |
// Therefore assume that we don't have the GIL (which causes us not to release it, | |
// but is "safe") | |
return 0; | |
} | |
// PyPy2 >= 7.3.6 has PyGILState_Check | |
// https://stackoverflow.com/a/25666624 | |
static CYTHON_INLINE int PyGILState_Check(void) { | |
PyThreadState * tstate = _PyThreadState_Current; | |
return tstate && (tstate == PyGILState_GetThisThreadState()); | |
} | |
// _PyDict_GetItem_KnownHash() existed from CPython 3.5 to 3.12, but it was | |
// dropping exceptions in 3.5. Since 3.6, exceptions are kept. | |
static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStr(PyObject *dict, PyObject *name) { | |
PyObject *res = __Pyx_PyDict_GetItemStrWithError(dict, name); | |
if (res == NULL) PyErr_Clear(); | |
return res; | |
} | |
static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStrWithError(PyObject *dict, PyObject *name) { | |
// This is tricky - we should return a borrowed reference but not swallow non-KeyError exceptions. 8-| | |
// But: this function is only used in Py2 and older PyPys, | |
// and currently only for argument parsing and other non-correctness-critical lookups | |
// and we know that 'name' is an interned 'str' with pre-calculated hash value (only comparisons can fail), | |
// thus, performance matters more than correctness here, especially in the "not found" case. | |
// So we ignore any exceptions in old PyPys ... | |
return PyDict_GetItem(dict, name); | |
// and hack together a stripped-down and modified PyDict_GetItem() in CPython 2. | |
PyDictEntry *ep; | |
PyDictObject *mp = (PyDictObject*) dict; | |
long hash = ((PyStringObject *) name)->ob_shash; | |
assert(hash != -1); /* hash values of interned strings are always initialised */ | |
ep = (mp->ma_lookup)(mp, name, hash); | |
if (ep == NULL) { | |
// error occurred | |
return NULL; | |
} | |
// found or not found | |
return ep->me_value; | |
} | |
/* Type slots */ | |
// Using PyObject_GenericSetAttr to bypass types immutability protection feels | |
// a little hacky, but it does work in the limited API . | |
// (It doesn't work on PyPy but that probably isn't a bug.) | |
// In Py3.8+, instances of heap types need to decref their type on deallocation. | |
// https://bugs.python.org/issue35810 | |
// __Pyx_PyUnicode_DATA() and __Pyx_PyUnicode_READ() must go together, e.g. for iteration. | |
//#define __Pyx_PyUnicode_WRITE(k, d, i, ch) /* not available */ | |
/* new Py3.3 unicode type (PEP 393) */ | |
// Py3.12 / PEP-623 removed wstr type unicode strings and all of the PyUnicode_READY() machinery. | |
// Avoid calling deprecated C-API functions in Py3.9+ that PEP-623 schedules for removal in Py3.12. | |
// https://www.python.org/dev/peps/pep-0623/ | |
// (void)(k) => avoid unused variable warning due to macro: | |
// ("..." % x) must call PyNumber_Remainder() if x is a string subclass that implements "__rmod__()". | |
// PyPy3 used to define "PyObject_Unicode" | |
// NOTE: might fail with exception => check for -1 | |
// Note that this doesn't leak a reference to whatever's at o[i] | |
static CYTHON_INLINE PyObject *__Pyx_PyImport_AddModuleRef(const char *name) { | |
PyObject *module = PyImport_AddModule(name); | |
Py_XINCREF(module); | |
return module; | |
} | |
typedef long Py_hash_t; | |
// backport of PyAsyncMethods from Py3.5 to older Py3.x versions | |
// (mis-)using the "tp_reserved" type slot which is re-activated as "tp_as_async" in Py3.5 | |
typedef struct { | |
unaryfunc am_await; | |
unaryfunc am_aiter; | |
unaryfunc am_anext; | |
} __Pyx_PyAsyncMethodsStruct; | |
/////////////// IncludeStructmemberH.proto /////////////// | |
/////////////// SmallCodeConfig.proto /////////////// | |
/////////////// PyModInitFuncType.proto /////////////// | |
// Py2: define this to void manually because PyMODINIT_FUNC adds __declspec(dllexport) to it's definition. | |
// Py3+: define this to PyObject * manually because PyMODINIT_FUNC adds __declspec(dllexport) to it's definition. | |
/////////////// FastTypeChecks.proto /////////////// | |
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);/*proto*/ | |
static CYTHON_INLINE int __Pyx_IsAnySubtype2(PyTypeObject *cls, PyTypeObject *a, PyTypeObject *b);/*proto*/ | |
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);/*proto*/ | |
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);/*proto*/ | |
/////////////// FastTypeChecks /////////////// | |
//@requires: Exceptions.c::PyThreadStateGet | |
//@requires: Exceptions.c::PyErrFetchRestore | |
static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { | |
while (a) { | |
a = __Pyx_PyType_GetSlot(a, tp_base, PyTypeObject*); | |
if (a == b) | |
return 1; | |
} | |
return b == &PyBaseObject_Type; | |
} | |
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { | |
PyObject *mro; | |
if (a == b) return 1; | |
mro = a->tp_mro; | |
if (likely(mro)) { | |
Py_ssize_t i, n; | |
n = PyTuple_GET_SIZE(mro); | |
for (i = 0; i < n; i++) { | |
if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) | |
return 1; | |
} | |
return 0; | |
} | |
// should only get here for incompletely initialised types, i.e. never under normal usage patterns | |
return __Pyx_InBases(a, b); | |
} | |
static CYTHON_INLINE int __Pyx_IsAnySubtype2(PyTypeObject *cls, PyTypeObject *a, PyTypeObject *b) { | |
PyObject *mro; | |
if (cls == a || cls == b) return 1; | |
mro = cls->tp_mro; | |
if (likely(mro)) { | |
Py_ssize_t i, n; | |
n = PyTuple_GET_SIZE(mro); | |
for (i = 0; i < n; i++) { | |
PyObject *base = PyTuple_GET_ITEM(mro, i); | |
if (base == (PyObject *)a || base == (PyObject *)b) | |
return 1; | |
} | |
return 0; | |
} | |
// should only get here for incompletely initialised types, i.e. never under normal usage patterns | |
return __Pyx_InBases(cls, a) || __Pyx_InBases(cls, b); | |
} | |
static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { | |
// PyObject_IsSubclass() can recurse and therefore is not safe | |
PyObject *exception, *value, *tb; | |
int res; | |
__Pyx_PyThreadState_declare | |
__Pyx_PyThreadState_assign | |
__Pyx_ErrFetch(&exception, &value, &tb); | |
res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; | |
// This function must not fail, so print the error here (which also clears it) | |
if (unlikely(res == -1)) { | |
PyErr_WriteUnraisable(err); | |
res = 0; | |
} | |
if (!res) { | |
res = PyObject_IsSubclass(err, exc_type2); | |
// This function must not fail, so print the error here (which also clears it) | |
if (unlikely(res == -1)) { | |
PyErr_WriteUnraisable(err); | |
res = 0; | |
} | |
} | |
__Pyx_ErrRestore(exception, value, tb); | |
return res; | |
} | |
static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { | |
if (exc_type1) { | |
return __Pyx_IsAnySubtype2((PyTypeObject*)err, (PyTypeObject*)exc_type1, (PyTypeObject*)exc_type2); | |
} else { | |
return __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); | |
} | |
} | |
// so far, we only call PyErr_GivenExceptionMatches() with an exception type (not instance) as first argument | |
// => optimise for that case | |
static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { | |
Py_ssize_t i, n; | |
assert(PyExceptionClass_Check(exc_type)); | |
n = PyTuple_GET_SIZE(tuple); | |
// the tighter subtype checking in Py3 allows faster out-of-order comparison | |
for (i=0; i<n; i++) { | |
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; | |
} | |
for (i=0; i<n; i++) { | |
PyObject *t = PyTuple_GET_ITEM(tuple, i); | |
if (likely(exc_type == t)) return 1; | |
if (likely(PyExceptionClass_Check(t))) { | |
if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1; | |
} else { | |
// FIXME: Py3: PyErr_SetString(PyExc_TypeError, "catching classes that do not inherit from BaseException is not allowed"); | |
} | |
} | |
return 0; | |
} | |
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { | |
if (likely(err == exc_type)) return 1; | |
if (likely(PyExceptionClass_Check(err))) { | |
if (likely(PyExceptionClass_Check(exc_type))) { | |
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); | |
} else if (likely(PyTuple_Check(exc_type))) { | |
return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type); | |
} else { | |
// FIXME: Py3: PyErr_SetString(PyExc_TypeError, "catching classes that do not inherit from BaseException is not allowed"); | |
} | |
} | |
return PyErr_GivenExceptionMatches(err, exc_type); | |
} | |
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { | |
// Only used internally with known exception types => pure safety check assertions. | |
assert(PyExceptionClass_Check(exc_type1)); | |
assert(PyExceptionClass_Check(exc_type2)); | |
if (likely(err == exc_type1 || err == exc_type2)) return 1; | |
if (likely(PyExceptionClass_Check(err))) { | |
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); | |
} | |
return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); | |
} | |
/////////////// MathInitCode /////////////// | |
static CYTHON_INLINE float __PYX_NAN() { | |
// Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and | |
// a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is | |
// a quiet NaN. | |
float value; | |
memset(&value, 0xFF, sizeof(value)); | |
return value; | |
} | |
/////////////// UtilityFunctionPredeclarations.proto /////////////// | |
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; | |
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ | |
/////////////// ForceInitThreads.proto /////////////// | |
//@proto_block: utility_code_proto_before_types | |
/////////////// InitThreads.init /////////////// | |
PyEval_InitThreads(); | |
/////////////// ModuleCreationPEP489 /////////////// | |
//@substitute: naming | |
//#if CYTHON_PEP489_MULTI_PHASE_INIT | |
static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { | |
static PY_INT64_T main_interpreter_id = -1; | |
PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); | |
if (main_interpreter_id == -1) { | |
main_interpreter_id = current_id; | |
return (unlikely(current_id == -1)) ? -1 : 0; | |
} else if (unlikely(main_interpreter_id != current_id)) | |
static PyInterpreterState *main_interpreter = NULL; | |
PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; | |
if (!main_interpreter) { | |
main_interpreter = current_interpreter; | |
} else if (unlikely(main_interpreter != current_interpreter)) | |
{ | |
PyErr_SetString( | |
PyExc_ImportError, | |
"Interpreter change detected - this module can only be loaded into one interpreter per process."); | |
return -1; | |
} | |
return 0; | |
} | |
static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *module, const char* from_name, const char* to_name, int allow_none) | |
static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) | |
{ | |
PyObject *value = PyObject_GetAttrString(spec, from_name); | |
int result = 0; | |
if (likely(value)) { | |
if (allow_none || value != Py_None) { | |
result = PyModule_AddObject(module, to_name, value); | |
result = PyDict_SetItemString(moddict, to_name, value); | |
} | |
Py_DECREF(value); | |
} else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { | |
PyErr_Clear(); | |
} else { | |
result = -1; | |
} | |
return result; | |
} | |
static CYTHON_SMALL_CODE PyObject* ${pymodule_create_func_cname}(PyObject *spec, PyModuleDef *def) { | |
PyObject *module = NULL, *moddict, *modname; | |
CYTHON_UNUSED_VAR(def); | |
// For now, we only have exactly one module instance. | |
if (__Pyx_check_single_interpreter()) | |
return NULL; | |
if (${module_cname}) | |
return __Pyx_NewRef(${module_cname}); | |
modname = PyObject_GetAttrString(spec, "name"); | |
if (unlikely(!modname)) goto bad; | |
module = PyModule_NewObject(modname); | |
Py_DECREF(modname); | |
if (unlikely(!module)) goto bad; | |
moddict = module; | |
moddict = PyModule_GetDict(module); | |
if (unlikely(!moddict)) goto bad; | |
// moddict is a borrowed reference | |
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; | |
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; | |
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; | |
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; | |
return module; | |
bad: | |
Py_XDECREF(module); | |
return NULL; | |
} | |
//#endif | |
/////////////// CodeObjectCache.proto /////////////// | |
typedef struct { | |
PyCodeObject* code_object; | |
int code_line; | |
} __Pyx_CodeObjectCacheEntry; | |
struct __Pyx_CodeObjectCache { | |
int count; | |
int max_count; | |
__Pyx_CodeObjectCacheEntry* entries; | |
}; | |
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; | |
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); | |
static PyCodeObject *__pyx_find_code_object(int code_line); | |
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); | |
/////////////// CodeObjectCache /////////////// | |
// Note that errors are simply ignored in the code below. | |
// This is just a cache, if a lookup or insertion fails - so what? | |
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { | |
int start = 0, mid = 0, end = count - 1; | |
if (end >= 0 && code_line > entries[end].code_line) { | |
return count; | |
} | |
while (start < end) { | |
mid = start + (end - start) / 2; | |
if (code_line < entries[mid].code_line) { | |
end = mid; | |
} else if (code_line > entries[mid].code_line) { | |
start = mid + 1; | |
} else { | |
return mid; | |
} | |
} | |
if (code_line <= entries[mid].code_line) { | |
return mid; | |
} else { | |
return mid + 1; | |
} | |
} | |
static PyCodeObject *__pyx_find_code_object(int code_line) { | |
PyCodeObject* code_object; | |
int pos; | |
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { | |
return NULL; | |
} | |
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); | |
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { | |
return NULL; | |
} | |
code_object = __pyx_code_cache.entries[pos].code_object; | |
Py_INCREF(code_object); | |
return code_object; | |
} | |
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { | |
int pos, i; | |
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; | |
if (unlikely(!code_line)) { | |
return; | |
} | |
if (unlikely(!entries)) { | |
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); | |
if (likely(entries)) { | |
__pyx_code_cache.entries = entries; | |
__pyx_code_cache.max_count = 64; | |
__pyx_code_cache.count = 1; | |
entries[0].code_line = code_line; | |
entries[0].code_object = code_object; | |
Py_INCREF(code_object); | |
} | |
return; | |
} | |
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); | |
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { | |
PyCodeObject* tmp = entries[pos].code_object; | |
entries[pos].code_object = code_object; | |
Py_DECREF(tmp); | |
return; | |
} | |
if (__pyx_code_cache.count == __pyx_code_cache.max_count) { | |
int new_max = __pyx_code_cache.max_count + 64; | |
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( | |
__pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); | |
if (unlikely(!entries)) { | |
return; | |
} | |
__pyx_code_cache.entries = entries; | |
__pyx_code_cache.max_count = new_max; | |
} | |
for (i=__pyx_code_cache.count; i>pos; i--) { | |
entries[i] = entries[i-1]; | |
} | |
entries[pos].code_line = code_line; | |
entries[pos].code_object = code_object; | |
__pyx_code_cache.count++; | |
Py_INCREF(code_object); | |
} | |
/////////////// CodeObjectCache.cleanup /////////////// | |
if (__pyx_code_cache.entries) { | |
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; | |
int i, count = __pyx_code_cache.count; | |
__pyx_code_cache.count = 0; | |
__pyx_code_cache.max_count = 0; | |
__pyx_code_cache.entries = NULL; | |
for (i=0; i<count; i++) { | |
Py_DECREF(entries[i].code_object); | |
} | |
PyMem_Free(entries); | |
} | |
/////////////// CheckBinaryVersion.proto /////////////// | |
static unsigned long __Pyx_get_runtime_version(void); | |
static int __Pyx_check_binary_version(unsigned long ct_version, unsigned long rt_version, int allow_newer); | |
/////////////// CheckBinaryVersion /////////////// | |
static unsigned long __Pyx_get_runtime_version(void) { | |
// We will probably never need the alpha/beta status, so avoid the complexity to parse it. | |
return Py_Version & ~0xFFUL; | |
const char* rt_version = Py_GetVersion(); | |
unsigned long version = 0; | |
unsigned long factor = 0x01000000UL; | |
unsigned int digit = 0; | |
int i = 0; | |
while (factor) { | |
while ('0' <= rt_version[i] && rt_version[i] <= '9') { | |
digit = digit * 10 + (unsigned int) (rt_version[i] - '0'); | |
++i; | |
} | |
version += factor * digit; | |
if (rt_version[i] != '.') | |
break; | |
digit = 0; | |
factor >>= 8; | |
++i; | |
} | |
return version; | |
} | |
static int __Pyx_check_binary_version(unsigned long ct_version, unsigned long rt_version, int allow_newer) { | |
// runtime version is: -1 => older, 0 => equal, 1 => newer | |
const unsigned long MAJOR_MINOR = 0xFFFF0000UL; | |
if ((rt_version & MAJOR_MINOR) == (ct_version & MAJOR_MINOR)) | |
return 0; | |
if (likely(allow_newer && (rt_version & MAJOR_MINOR) > (ct_version & MAJOR_MINOR))) | |
return 1; | |
{ | |
char message[200]; | |
PyOS_snprintf(message, sizeof(message), | |
"compile time Python version %d.%d " | |
"of module '%.100s' " | |
"%s " | |
"runtime version %d.%d", | |
(int) (ct_version >> 24), (int) ((ct_version >> 16) & 0xFF), | |
__Pyx_MODULE_NAME, | |
(allow_newer) ? "was newer than" : "does not match", | |
(int) (rt_version >> 24), (int) ((rt_version >> 16) & 0xFF) | |
); | |
// returns 0 or -1 | |
return PyErr_WarnEx(NULL, message, 1); | |
} | |
} | |
/////////////// IsLittleEndian.proto /////////////// | |
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); | |
/////////////// IsLittleEndian /////////////// | |
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) | |
{ | |
union { | |
uint32_t u32; | |
uint8_t u8[4]; | |
} S; | |
S.u32 = 0x01020304; | |
return S.u8[0] == 4; | |
} | |
/////////////// Refnanny.proto /////////////// | |
typedef struct { | |
void (*INCREF)(void*, PyObject*, Py_ssize_t); | |
void (*DECREF)(void*, PyObject*, Py_ssize_t); | |
void (*GOTREF)(void*, PyObject*, Py_ssize_t); | |
void (*GIVEREF)(void*, PyObject*, Py_ssize_t); | |
void* (*SetupContext)(const char*, Py_ssize_t, const char*); | |
void (*FinishContext)(void**); | |
} __Pyx_RefNannyAPIStruct; | |
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; | |
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ | |
/////////////// Refnanny /////////////// | |
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { | |
PyObject *m = NULL, *p = NULL; | |
void *r = NULL; | |
m = PyImport_ImportModule(modname); | |
if (!m) goto end; | |
p = PyObject_GetAttrString(m, "RefNannyAPI"); | |
if (!p) goto end; | |
r = PyLong_AsVoidPtr(p); | |
end: | |
Py_XDECREF(p); | |
Py_XDECREF(m); | |
return (__Pyx_RefNannyAPIStruct *)r; | |
} | |
/////////////// ImportRefnannyAPI /////////////// | |
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); | |
if (!__Pyx_RefNanny) { | |
PyErr_Clear(); | |
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); | |
if (!__Pyx_RefNanny) | |
Py_FatalError("failed to import 'refnanny' module"); | |
} | |
/////////////// RegisterModuleCleanup.proto /////////////// | |
//@substitute: naming | |
static void ${cleanup_cname}(PyObject *self); /*proto*/ | |
static int __Pyx_RegisterCleanup(void); /*proto*/ | |
/////////////// RegisterModuleCleanup /////////////// | |
//@substitute: naming | |
static PyObject* ${cleanup_cname}_atexit(PyObject *module, PyObject *unused) { | |
CYTHON_UNUSED_VAR(unused); | |
${cleanup_cname}(module); | |
Py_INCREF(Py_None); return Py_None; | |
} | |
static int __Pyx_RegisterCleanup(void) { | |
// Don't use Py_AtExit because that has a 32-call limit and is called | |
// after python finalization. | |
// Also, we try to prepend the cleanup function to "atexit._exithandlers" | |
// in Py2 because CPython runs them last-to-first. Being run last allows | |
// user exit code to run before us that may depend on the globals | |
// and cached objects that we are about to clean up. | |
static PyMethodDef cleanup_def = { | |
"__cleanup", (PyCFunction)${cleanup_cname}_atexit, METH_NOARGS, 0}; | |
PyObject *cleanup_func = 0; | |
PyObject *atexit = 0; | |
PyObject *reg = 0; | |
PyObject *args = 0; | |
PyObject *res = 0; | |
int ret = -1; | |
cleanup_func = PyCFunction_New(&cleanup_def, 0); | |
if (!cleanup_func) | |
goto bad; | |
atexit = PyImport_ImportModule("atexit"); | |
if (!atexit) | |
goto bad; | |
reg = PyObject_GetAttrString(atexit, "_exithandlers"); | |
if (reg && PyList_Check(reg)) { | |
PyObject *a, *kw; | |
a = PyTuple_New(0); | |
kw = PyDict_New(); | |
if (!a || !kw) { | |
Py_XDECREF(a); | |
Py_XDECREF(kw); | |
goto bad; | |
} | |
args = PyTuple_Pack(3, cleanup_func, a, kw); | |
Py_DECREF(a); | |
Py_DECREF(kw); | |
if (!args) | |
goto bad; | |
ret = PyList_Insert(reg, 0, args); | |
} else { | |
if (!reg) | |
PyErr_Clear(); | |
Py_XDECREF(reg); | |
reg = PyObject_GetAttrString(atexit, "register"); | |
if (!reg) | |
goto bad; | |
args = PyTuple_Pack(1, cleanup_func); | |
if (!args) | |
goto bad; | |
res = PyObject_CallObject(reg, args); | |
if (!res) | |
goto bad; | |
ret = 0; | |
} | |
bad: | |
Py_XDECREF(cleanup_func); | |
Py_XDECREF(atexit); | |
Py_XDECREF(reg); | |
Py_XDECREF(args); | |
Py_XDECREF(res); | |
return ret; | |
} | |
/////////////// FastGil.init /////////////// | |
__Pyx_FastGilFuncInit(); | |
/////////////// NoFastGil.proto /////////////// | |
//@proto_block: utility_code_proto_before_types | |
/////////////// FastGil.proto /////////////// | |
//@proto_block: utility_code_proto_before_types | |
struct __Pyx_FastGilVtab { | |
PyGILState_STATE (*Fast_PyGILState_Ensure)(void); | |
void (*Fast_PyGILState_Release)(PyGILState_STATE oldstate); | |
void (*FastGIL_Remember)(void); | |
void (*FastGIL_Forget)(void); | |
}; | |
static void __Pyx_FastGIL_Noop(void) {} | |
static struct __Pyx_FastGilVtab __Pyx_FastGilFuncs = { | |
PyGILState_Ensure, | |
PyGILState_Release, | |
__Pyx_FastGIL_Noop, | |
__Pyx_FastGIL_Noop | |
}; | |
static void __Pyx_FastGilFuncInit(void); | |
/////////////// FastGil /////////////// | |
// The implementations of PyGILState_Ensure/Release calls PyThread_get_key_value | |
// several times which is turns out to be quite slow (slower in fact than | |
// acquiring the GIL itself). Simply storing it in a thread local for the | |
// common case is much faster. | |
// To make optimal use of this thread local, we attempt to share it between | |
// modules. | |
static CYTHON_THREAD_LOCAL PyThreadState *__Pyx_FastGil_tcur = NULL; | |
static CYTHON_THREAD_LOCAL int __Pyx_FastGil_tcur_depth = 0; | |
static int __Pyx_FastGil_autoTLSkey = -1; | |
static CYTHON_INLINE void __Pyx_FastGIL_Remember0(void) { | |
++__Pyx_FastGil_tcur_depth; | |
} | |
static CYTHON_INLINE void __Pyx_FastGIL_Forget0(void) { | |
if (--__Pyx_FastGil_tcur_depth == 0) { | |
__Pyx_FastGil_tcur = NULL; | |
} | |
} | |
static CYTHON_INLINE PyThreadState *__Pyx_FastGil_get_tcur(void) { | |
PyThreadState *tcur = __Pyx_FastGil_tcur; | |
if (tcur == NULL) { | |
tcur = __Pyx_FastGil_tcur = (PyThreadState*)PyThread_get_key_value(__Pyx_FastGil_autoTLSkey); | |
} | |
return tcur; | |
} | |
static PyGILState_STATE __Pyx_FastGil_PyGILState_Ensure(void) { | |
int current; | |
PyThreadState *tcur; | |
__Pyx_FastGIL_Remember0(); | |
tcur = __Pyx_FastGil_get_tcur(); | |
if (tcur == NULL) { | |
// Uninitialized, need to initialize now. | |
return PyGILState_Ensure(); | |
} | |
current = tcur == __Pyx_PyThreadState_Current; | |
if (current == 0) { | |
PyEval_RestoreThread(tcur); | |
} | |
++tcur->gilstate_counter; | |
return current ? PyGILState_LOCKED : PyGILState_UNLOCKED; | |
} | |
static void __Pyx_FastGil_PyGILState_Release(PyGILState_STATE oldstate) { | |
PyThreadState *tcur = __Pyx_FastGil_get_tcur(); | |
__Pyx_FastGIL_Forget0(); | |
if (tcur->gilstate_counter == 1) { | |
// This is the last lock, do all the cleanup as well. | |
PyGILState_Release(oldstate); | |
} else { | |
--tcur->gilstate_counter; | |
if (oldstate == PyGILState_UNLOCKED) { | |
PyEval_SaveThread(); | |
} | |
} | |
} | |
static void __Pyx_FastGilFuncInit0(void) { | |
/* Try to detect autoTLSkey. */ | |
int key; | |
void* this_thread_state = (void*) PyGILState_GetThisThreadState(); | |
for (key = 0; key < 100; key++) { | |
if (PyThread_get_key_value(key) == this_thread_state) { | |
__Pyx_FastGil_autoTLSkey = key; | |
break; | |
} | |
} | |
if (__Pyx_FastGil_autoTLSkey != -1) { | |
PyObject* capsule = NULL; | |
PyObject* abi_module = NULL; | |
__Pyx_PyGILState_Ensure = __Pyx_FastGil_PyGILState_Ensure; | |
__Pyx_PyGILState_Release = __Pyx_FastGil_PyGILState_Release; | |
__Pyx_FastGIL_Remember = __Pyx_FastGIL_Remember0; | |
__Pyx_FastGIL_Forget = __Pyx_FastGIL_Forget0; | |
capsule = PyCapsule_New(&__Pyx_FastGilFuncs, __Pyx_FastGIL_PyCapsule, NULL); | |
if (capsule) { | |
abi_module = __Pyx_PyImport_AddModuleRef(__Pyx_FastGIL_ABI_module); | |
if (abi_module) { | |
PyObject_SetAttrString(abi_module, __Pyx_FastGIL_PyCapsuleName, capsule); | |
Py_DECREF(abi_module); | |
} | |
} | |
Py_XDECREF(capsule); | |
} | |
} | |
static void __Pyx_FastGilFuncInit0(void) { | |
} | |
static void __Pyx_FastGilFuncInit(void) { | |
struct __Pyx_FastGilVtab* shared = (struct __Pyx_FastGilVtab*)PyCapsule_Import(__Pyx_FastGIL_PyCapsule, 1); | |
if (shared) { | |
__Pyx_FastGilFuncs = *shared; | |
} else { | |
PyErr_Clear(); | |
__Pyx_FastGilFuncInit0(); | |
} | |
} | |
///////////////////// UtilityCodePragmas ///////////////////////// | |
/* Warning 4127: conditional expression is constant | |
* Cython uses constant conditional expressions to allow in inline functions to be optimized at | |
* compile-time, so this warning is not useful | |
*/ | |
///////////////////// UtilityCodePragmasEnd ////////////////////// | |