diff --git a/py311/lib/python3.11/site-packages/Levenshtein/StringMatcher.py b/py311/lib/python3.11/site-packages/Levenshtein/StringMatcher.py new file mode 100644 index 0000000000000000000000000000000000000000..269b98a694fc9224ba841493ac1469d485fd6555 --- /dev/null +++ b/py311/lib/python3.11/site-packages/Levenshtein/StringMatcher.py @@ -0,0 +1,74 @@ +from __future__ import annotations + +from warnings import warn + +from Levenshtein import distance, editops, matching_blocks, opcodes, ratio + + +class StringMatcher: + """A SequenceMatcher-like class built on the top of Levenshtein""" + + def _reset_cache(self): + self._ratio = self._distance = None + self._opcodes = self._editops = self._matching_blocks = None + + def __init__(self, isjunk=None, seq1="", seq2="", autojunk=False): + if isjunk: + warn("isjunk NOT implemented, it will be ignored", stacklevel=1) + if autojunk: + warn("autojunk NOT implemented, it will be ignored", stacklevel=1) + self._str1, self._str2 = seq1, seq2 + self._reset_cache() + + def set_seqs(self, seq1, seq2): + self._str1, self._str2 = seq1, seq2 + self._reset_cache() + + def set_seq1(self, seq1): + self._str1 = seq1 + self._reset_cache() + + def set_seq2(self, seq2): + self._str2 = seq2 + self._reset_cache() + + def get_opcodes(self): + if not self._opcodes: + if self._editops: + self._opcodes = opcodes(self._editops, self._str1, self._str2) + else: + self._opcodes = opcodes(self._str1, self._str2) + return self._opcodes + + def get_editops(self): + if not self._editops: + if self._opcodes: + self._editops = editops(self._opcodes, self._str1, self._str2) + else: + self._editops = editops(self._str1, self._str2) + return self._editops + + def get_matching_blocks(self): + if not self._matching_blocks: + self._matching_blocks = matching_blocks(self.get_opcodes(), self._str1, self._str2) + return self._matching_blocks + + def ratio(self): + if not self._ratio: + self._ratio = ratio(self._str1, self._str2) + return self._ratio + + def quick_ratio(self): + # This is usually quick enough :o) + if not self._ratio: + self._ratio = ratio(self._str1, self._str2) + return self._ratio + + def real_quick_ratio(self): + len1, len2 = len(self._str1), len(self._str2) + return 2.0 * min(len1, len2) / (len1 + len2) + + def distance(self): + if not self._distance: + self._distance = distance(self._str1, self._str2) + return self._distance diff --git a/py311/lib/python3.11/site-packages/Levenshtein/__init__.py b/py311/lib/python3.11/site-packages/Levenshtein/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..04a929e006f22f2ccdf5efc2833b53d0e5671631 --- /dev/null +++ b/py311/lib/python3.11/site-packages/Levenshtein/__init__.py @@ -0,0 +1,550 @@ +""" +A C extension module for fast computation of: +- Levenshtein (edit) distance and edit sequence manipulation +- string similarity +- approximate median strings, and generally string averaging +- string sequence and set similarity + +Levenshtein has a some overlap with difflib (SequenceMatcher). It +supports only strings, not arbitrary sequence types, but on the +other hand it's much faster. + +It supports both normal and Unicode strings, but can't mix them, all +arguments to a function (method) have to be of the same type (or its +subclasses). +""" + +from __future__ import annotations + +__author__: str = "Max Bachmann" +__license__: str = "GPL" +__version__: str = "0.27.3" + +import rapidfuzz.distance.Hamming as _Hamming +import rapidfuzz.distance.Indel as _Indel +import rapidfuzz.distance.Jaro as _Jaro +import rapidfuzz.distance.JaroWinkler as _JaroWinkler +import rapidfuzz.distance.Levenshtein as _Levenshtein +from rapidfuzz.distance import ( + Editops as _Editops, +) +from rapidfuzz.distance import ( + Opcodes as _Opcodes, +) + +from Levenshtein.levenshtein_cpp import ( + median, + median_improve, + quickmedian, + seqratio, + setmedian, + setratio, +) + +__all__ = [ + "quickmedian", + "median", + "median_improve", + "setmedian", + "setratio", + "seqratio", + "distance", + "ratio", + "hamming", + "jaro", + "jaro_winkler", + "editops", + "opcodes", + "matching_blocks", + "apply_edit", + "subtract_edit", + "inverse", +] + + +def distance(s1, s2, *, weights=(1, 1, 1), processor=None, score_cutoff=None, score_hint=None): + """ + Calculates the minimum number of insertions, deletions, and substitutions + required to change one sequence into the other according to Levenshtein with custom + costs for insertion, deletion and substitution + + Parameters + ---------- + s1 : Sequence[Hashable] + First string to compare. + s2 : Sequence[Hashable] + Second string to compare. + weights : Tuple[int, int, int] or None, optional + The weights for the three operations in the form + (insertion, deletion, substitution). Default is (1, 1, 1), + which gives all three operations a weight of 1. + processor: callable, optional + Optional callable that is used to preprocess the strings before + comparing them. Default is None, which deactivates this behaviour. + score_cutoff : int, optional + Maximum distance between s1 and s2, that is + considered as a result. If the distance is bigger than score_cutoff, + score_cutoff + 1 is returned instead. Default is None, which deactivates + this behaviour. + score_hint : int, optional + Expected distance between s1 and s2. This is used to select a + faster implementation. Default is None, which deactivates this behaviour. + + Returns + ------- + distance : int + distance between s1 and s2 + + Raises + ------ + ValueError + If unsupported weights are provided a ValueError is thrown + + Examples + -------- + Find the Levenshtein distance between two strings: + + >>> from Levenshtein import distance + >>> distance("lewenstein", "levenshtein") + 2 + + Setting a maximum distance allows the implementation to select + a more efficient implementation: + + >>> distance("lewenstein", "levenshtein", score_cutoff=1) + 2 + + It is possible to select different weights by passing a `weight` + tuple. + + >>> distance("lewenstein", "levenshtein", weights=(1,1,2)) + 3 + """ + return _Levenshtein.distance( + s1, + s2, + weights=weights, + processor=processor, + score_cutoff=score_cutoff, + score_hint=score_hint, + ) + + +def ratio(s1, s2, *, processor=None, score_cutoff=None): + """ + Calculates a normalized indel similarity in the range [0, 1]. + The indel distance calculates the minimum number of insertions and deletions + required to change one sequence into the other. + + This is calculated as ``1 - (distance / (len1 + len2))`` + + Parameters + ---------- + s1 : Sequence[Hashable] + First string to compare. + s2 : Sequence[Hashable] + Second string to compare. + processor: callable, optional + Optional callable that is used to preprocess the strings before + comparing them. Default is None, which deactivates this behaviour. + score_cutoff : float, optional + Optional argument for a score threshold as a float between 0 and 1.0. + For norm_sim < score_cutoff 0 is returned instead. Default is 0, + which deactivates this behaviour. + + Returns + ------- + norm_sim : float + normalized similarity between s1 and s2 as a float between 0 and 1.0 + + Examples + -------- + Find the normalized Indel similarity between two strings: + + >>> from Levenshtein import ratio + >>> ratio("lewenstein", "levenshtein") + 0.85714285714285 + + Setting a score_cutoff allows the implementation to select + a more efficient implementation: + + >>> ratio("lewenstein", "levenshtein", score_cutoff=0.9) + 0.0 + + When a different processor is used s1 and s2 do not have to be strings + + >>> ratio(["lewenstein"], ["levenshtein"], processor=lambda s: s[0]) + 0.8571428571428572 + """ + return _Indel.normalized_similarity(s1, s2, processor=processor, score_cutoff=score_cutoff) + + +def hamming(s1, s2, *, pad=True, processor=None, score_cutoff=None): + """ + Calculates the Hamming distance between two strings. + The hamming distance is defined as the number of positions + where the two strings differ. It describes the minimum + amount of substitutions required to transform s1 into s2. + + Parameters + ---------- + s1 : Sequence[Hashable] + First string to compare. + s2 : Sequence[Hashable] + Second string to compare. + pad : bool, optional + should strings be padded if there is a length difference. + If pad is False and strings have a different length + a ValueError is thrown instead. Default is True. + processor: callable, optional + Optional callable that is used to preprocess the strings before + comparing them. Default is None, which deactivates this behaviour. + score_cutoff : int or None, optional + Maximum distance between s1 and s2, that is + considered as a result. If the distance is bigger than score_cutoff, + score_cutoff + 1 is returned instead. Default is None, which deactivates + this behaviour. + + Returns + ------- + distance : int + distance between s1 and s2 + + Raises + ------ + ValueError + If s1 and s2 have a different length + """ + return _Hamming.distance(s1, s2, pad=pad, processor=processor, score_cutoff=score_cutoff) + + +def jaro(s1, s2, *, processor=None, score_cutoff=None) -> float: + """ + Calculates the jaro similarity + + Parameters + ---------- + s1 : Sequence[Hashable] + First string to compare. + s2 : Sequence[Hashable] + Second string to compare. + processor: callable, optional + Optional callable that is used to preprocess the strings before + comparing them. Default is None, which deactivates this behaviour. + score_cutoff : float, optional + Optional argument for a score threshold as a float between 0 and 1.0. + For ratio < score_cutoff 0 is returned instead. Default is None, + which deactivates this behaviour. + + Returns + ------- + similarity : float + similarity between s1 and s2 as a float between 0 and 1.0 + """ + return _Jaro.similarity(s1, s2, processor=processor, score_cutoff=score_cutoff) + + +def jaro_winkler(s1, s2, *, prefix_weight=0.1, processor=None, score_cutoff=None) -> float: + """ + Calculates the jaro winkler similarity + + Parameters + ---------- + s1 : Sequence[Hashable] + First string to compare. + s2 : Sequence[Hashable] + Second string to compare. + prefix_weight : float, optional + Weight used for the common prefix of the two strings. + Has to be between 0 and 0.25. Default is 0.1. + processor: callable, optional + Optional callable that is used to preprocess the strings before + comparing them. Default is None, which deactivates this behaviour. + score_cutoff : float, optional + Optional argument for a score threshold as a float between 0 and 1.0. + For ratio < score_cutoff 0 is returned instead. Default is None, + which deactivates this behaviour. + + Returns + ------- + similarity : float + similarity between s1 and s2 as a float between 0 and 1.0 + + Raises + ------ + ValueError + If prefix_weight is invalid + """ + return _JaroWinkler.similarity( + s1, + s2, + prefix_weight=prefix_weight, + processor=processor, + score_cutoff=score_cutoff, + ) + + +# assign attributes to function. This allows rapidfuzz to call them more efficiently +# we can't directly copy the functions + replace the docstrings, since this leads to +# crashes on PyPy +distance._RF_OriginalScorer = distance +ratio._RF_OriginalScorer = ratio +hamming._RF_OriginalScorer = hamming +jaro._RF_OriginalScorer = jaro +jaro_winkler._RF_OriginalScorer = jaro_winkler + +distance._RF_ScorerPy = _Levenshtein.distance._RF_ScorerPy +ratio._RF_ScorerPy = _Indel.normalized_similarity._RF_ScorerPy +hamming._RF_ScorerPy = _Hamming.distance._RF_ScorerPy +jaro._RF_ScorerPy = _Jaro.similarity._RF_ScorerPy +jaro_winkler._RF_ScorerPy = _JaroWinkler.similarity._RF_ScorerPy + +if hasattr(_Levenshtein.distance, "_RF_Scorer"): + distance._RF_Scorer = _Levenshtein.distance._RF_Scorer +if hasattr(_Indel.normalized_similarity, "_RF_Scorer"): + ratio._RF_Scorer = _Indel.normalized_similarity._RF_Scorer +if hasattr(_Hamming.distance, "_RF_Scorer"): + hamming._RF_Scorer = _Hamming.distance._RF_Scorer +if hasattr(_Jaro.similarity, "_RF_Scorer"): + jaro._RF_Scorer = _Jaro.similarity._RF_Scorer +if hasattr(_JaroWinkler.similarity, "_RF_Scorer"): + jaro_winkler._RF_Scorer = _JaroWinkler.similarity._RF_Scorer + + +def editops(*args): + """ + Find sequence of edit operations transforming one string to another. + + editops(source_string, destination_string) + editops(edit_operations, source_length, destination_length) + + The result is a list of triples (operation, spos, dpos), where + operation is one of 'equal', 'replace', 'insert', or 'delete'; spos + and dpos are position of characters in the first (source) and the + second (destination) strings. These are operations on single + characters. In fact the returned list doesn't contain the 'equal', + but all the related functions accept both lists with and without + 'equal's. + + Examples + -------- + >>> editops('spam', 'park') + [('delete', 0, 0), ('insert', 3, 2), ('replace', 3, 3)] + + The alternate form editops(opcodes, source_string, destination_string) + can be used for conversion from opcodes (5-tuples) to editops (you can + pass strings or their lengths, it doesn't matter). + """ + # convert: we were called (bops, s1, s2) + if len(args) == 3: + arg1, arg2, arg3 = args + len1 = arg2 if isinstance(arg2, int) else len(arg2) + len2 = arg3 if isinstance(arg3, int) else len(arg3) + return _Editops(arg1, len1, len2).as_list() + + # find editops: we were called (s1, s2) + arg1, arg2 = args + return _Levenshtein.editops(arg1, arg2).as_list() + + +def opcodes(*args): + """ + Find sequence of edit operations transforming one string to another. + + opcodes(source_string, destination_string) + opcodes(edit_operations, source_length, destination_length) + + The result is a list of 5-tuples with the same meaning as in + SequenceMatcher's get_opcodes() output. But since the algorithms + differ, the actual sequences from Levenshtein and SequenceMatcher + may differ too. + + Examples + -------- + >>> for x in opcodes('spam', 'park'): + ... print(x) + ... + ('delete', 0, 1, 0, 0) + ('equal', 1, 3, 0, 2) + ('insert', 3, 3, 2, 3) + ('replace', 3, 4, 3, 4) + + The alternate form opcodes(editops, source_string, destination_string) + can be used for conversion from editops (triples) to opcodes (you can + pass strings or their lengths, it doesn't matter). + """ + # convert: we were called (ops, s1, s2) + if len(args) == 3: + arg1, arg2, arg3 = args + len1 = arg2 if isinstance(arg2, int) else len(arg2) + len2 = arg3 if isinstance(arg3, int) else len(arg3) + return _Opcodes(arg1, len1, len2).as_list() + + # find editops: we were called (s1, s2) + arg1, arg2 = args + return _Levenshtein.opcodes(arg1, arg2).as_list() + + +def matching_blocks(edit_operations, source_string, destination_string): + """ + Find identical blocks in two strings. + + Parameters + ---------- + edit_operations : list[] + editops or opcodes created for the source and destination string + source_string : str | int + source string or the length of the source string + destination_string : str | int + destination string or the length of the destination string + + Returns + ------- + matching_blocks : list[] + List of triples with the same meaning as in SequenceMatcher's + get_matching_blocks() output. + + Examples + -------- + >>> a, b = 'spam', 'park' + >>> matching_blocks(editops(a, b), a, b) + [(1, 0, 2), (4, 4, 0)] + >>> matching_blocks(editops(a, b), len(a), len(b)) + [(1, 0, 2), (4, 4, 0)] + + The last zero-length block is not an error, but it's there for + compatibility with difflib which always emits it. + + One can join the matching blocks to get two identical strings: + + >>> a, b = 'dog kennels', 'mattresses' + >>> mb = matching_blocks(editops(a,b), a, b) + >>> ''.join([a[x[0]:x[0]+x[2]] for x in mb]) + 'ees' + >>> ''.join([b[x[1]:x[1]+x[2]] for x in mb]) + 'ees' + """ + len1 = source_string if isinstance(source_string, int) else len(source_string) + len2 = destination_string if isinstance(destination_string, int) else len(destination_string) + + if not edit_operations or len(edit_operations[0]) == 3: + return _Editops(edit_operations, len1, len2).as_matching_blocks() + + return _Opcodes(edit_operations, len1, len2).as_matching_blocks() + + +def apply_edit(edit_operations, source_string, destination_string): + """ + Apply a sequence of edit operations to a string. + + apply_edit(edit_operations, source_string, destination_string) + + In the case of editops, the sequence can be arbitrary ordered subset + of the edit sequence transforming source_string to destination_string. + + Examples + -------- + >>> e = editops('man', 'scotsman') + >>> apply_edit(e, 'man', 'scotsman') + 'scotsman' + >>> apply_edit(e[:3], 'man', 'scotsman') + 'scoman' + + The other form of edit operations, opcodes, is not very suitable for + such a tricks, because it has to always span over complete strings, + subsets can be created by carefully replacing blocks with 'equal' + blocks, or by enlarging 'equal' block at the expense of other blocks + and adjusting the other blocks accordingly. + + >>> a, b = 'spam and eggs', 'foo and bar' + >>> e = opcodes(a, b) + >>> apply_edit(inverse(e), b, a) + 'spam and eggs' + """ + if len(edit_operations) == 0: + return source_string + + len1 = len(source_string) + len2 = len(destination_string) + + if len(edit_operations[0]) == 3: + return _Editops(edit_operations, len1, len2).apply(source_string, destination_string) + + return _Opcodes(edit_operations, len1, len2).apply(source_string, destination_string) + + +def subtract_edit(edit_operations, subsequence): + """ + Subtract an edit subsequence from a sequence. + + subtract_edit(edit_operations, subsequence) + + The result is equivalent to + editops(apply_edit(subsequence, s1, s2), s2), except that is + constructed directly from the edit operations. That is, if you apply + it to the result of subsequence application, you get the same final + string as from application complete edit_operations. It may be not + identical, though (in amibuous cases, like insertion of a character + next to the same character). + + The subtracted subsequence must be an ordered subset of + edit_operations. + + Note this function does not accept difflib-style opcodes as no one in + his right mind wants to create subsequences from them. + + Examples + -------- + >>> e = editops('man', 'scotsman') + >>> e1 = e[:3] + >>> bastard = apply_edit(e1, 'man', 'scotsman') + >>> bastard + 'scoman' + >>> apply_edit(subtract_edit(e, e1), bastard, 'scotsman') + 'scotsman' + """ + str_len = 2**32 + return ( + _Editops(edit_operations, str_len, str_len) + .remove_subsequence(_Editops(subsequence, str_len, str_len)) + .as_list() + ) + + +def inverse(edit_operations): + """ + Invert the sense of an edit operation sequence. + + In other words, it returns a list of edit operations transforming the + second (destination) string to the first (source). It can be used + with both editops and opcodes. + + Parameters + ---------- + edit_operations : list[] + edit operations to invert + + Returns + ------- + edit_operations : list[] + inverted edit operations + + Examples + -------- + >>> editops('spam', 'park') + [('delete', 0, 0), ('insert', 3, 2), ('replace', 3, 3)] + >>> inverse(editops('spam', 'park')) + [('insert', 0, 0), ('delete', 2, 3), ('replace', 3, 3)] + """ + if len(edit_operations) == 0: + return [] + + if len(edit_operations[0]) == 3: + len1 = edit_operations[-1][1] + 1 + len2 = edit_operations[-1][2] + 1 + return _Editops(edit_operations, len1, len2).inverse().as_list() + + len1 = edit_operations[-1][2] + len2 = edit_operations[-1][4] + + return _Opcodes(edit_operations, len1, len2).inverse().as_list() diff --git a/py311/lib/python3.11/site-packages/Levenshtein/__init__.pyi b/py311/lib/python3.11/site-packages/Levenshtein/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..8b51e2709c32bbc7867a111b0fe3b495094f3781 --- /dev/null +++ b/py311/lib/python3.11/site-packages/Levenshtein/__init__.pyi @@ -0,0 +1,87 @@ +from __future__ import annotations + +from collections.abc import Callable, Hashable, Sequence +from typing import overload + +__author__: str +__license__: str +__version__: str + +_EditopsList = list[tuple[str, int, int]] +_OpcodesList = list[tuple[str, int, int, int, int]] +_MatchingBlocks = list[tuple[int, int, int]] +_AnyEditops = _EditopsList | _OpcodesList + +def inverse(edit_operations: list) -> list: ... +@overload +def editops(s1: Sequence[Hashable], s2: Sequence[Hashable]) -> _EditopsList: ... +@overload +def editops( + ops: _AnyEditops, + s1: Sequence[Hashable] | int, + s2: Sequence[Hashable] | int, +) -> _EditopsList: ... +@overload +def opcodes(s1: Sequence[Hashable], s2: Sequence[Hashable]) -> _OpcodesList: ... +@overload +def opcodes( + ops: _AnyEditops, + s1: Sequence[Hashable] | int, + s2: Sequence[Hashable] | int, +) -> _OpcodesList: ... +def matching_blocks( + edit_operations: _AnyEditops, + source_string: Sequence[Hashable] | int, + destination_string: Sequence[Hashable] | int, +) -> _MatchingBlocks: ... +def subtract_edit(edit_operations: _EditopsList, subsequence: _EditopsList) -> _EditopsList: ... +def apply_edit(edit_operations: _AnyEditops, source_string: str, destination_string: str) -> str: ... +def median(strlist: list[str | bytes], wlist: list[float] | None = None) -> str: ... +def quickmedian(strlist: list[str | bytes], wlist: list[float] | None = None) -> str: ... +def median_improve( + string: str | bytes, + strlist: list[str | bytes], + wlist: list[float] | None = None, +) -> str: ... +def setmedian(strlist: list[str | bytes], wlist: list[float] | None = None) -> str: ... +def setratio(strlist1: list[str | bytes], strlist2: list[str | bytes]) -> float: ... +def seqratio(strlist1: list[str | bytes], strlist2: list[str | bytes]) -> float: ... +def distance( + s1: Sequence[Hashable], + s2: Sequence[Hashable], + *, + weights: tuple[int, int, int] | None = (1, 1, 1), + processor: Callable[..., Sequence[Hashable]] | None = None, + score_cutoff: float | None = None, + score_hint: float | None = None, +) -> int: ... +def ratio( + s1: Sequence[Hashable], + s2: Sequence[Hashable], + *, + processor: Callable[..., Sequence[Hashable]] | None = None, + score_cutoff: float | None = None, +) -> float: ... +def hamming( + s1: Sequence[Hashable], + s2: Sequence[Hashable], + *, + pad: bool = True, + processor: Callable[..., Sequence[Hashable]] | None = None, + score_cutoff: float | None = None, +) -> int: ... +def jaro( + s1: Sequence[Hashable], + s2: Sequence[Hashable], + *, + processor: Callable[..., Sequence[Hashable]] | None = None, + score_cutoff: float | None = None, +) -> float: ... +def jaro_winkler( + s1: Sequence[Hashable], + s2: Sequence[Hashable], + *, + prefix_weight: float | None = 0.1, + processor: Callable[..., Sequence[Hashable]] | None = None, + score_cutoff: float | None = None, +) -> float: ... diff --git a/py311/lib/python3.11/site-packages/Levenshtein/py.typed b/py311/lib/python3.11/site-packages/Levenshtein/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/aiohappyeyeballs-2.6.1.dist-info/INSTALLER b/py311/lib/python3.11/site-packages/aiohappyeyeballs-2.6.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohappyeyeballs-2.6.1.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/py311/lib/python3.11/site-packages/aiohappyeyeballs-2.6.1.dist-info/LICENSE b/py311/lib/python3.11/site-packages/aiohappyeyeballs-2.6.1.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f26bcf4d2de6eb136e31006ca3ab447d5e488adf --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohappyeyeballs-2.6.1.dist-info/LICENSE @@ -0,0 +1,279 @@ +A. HISTORY OF THE SOFTWARE +========================== + +Python was created in the early 1990s by Guido van Rossum at Stichting +Mathematisch Centrum (CWI, see https://www.cwi.nl) in the Netherlands +as a successor of a language called ABC. Guido remains Python's +principal author, although it includes many contributions from others. + +In 1995, Guido continued his work on Python at the Corporation for +National Research Initiatives (CNRI, see https://www.cnri.reston.va.us) +in Reston, Virginia where he released several versions of the +software. + +In May 2000, Guido and the Python core development team moved to +BeOpen.com to form the BeOpen PythonLabs team. In October of the same +year, the PythonLabs team moved to Digital Creations, which became +Zope Corporation. In 2001, the Python Software Foundation (PSF, see +https://www.python.org/psf/) was formed, a non-profit organization +created specifically to own Python-related Intellectual Property. +Zope Corporation was a sponsoring member of the PSF. + +All Python releases are Open Source (see https://opensource.org for +the Open Source Definition). Historically, most, but not all, Python +releases have also been GPL-compatible; the table below summarizes +the various releases. + + Release Derived Year Owner GPL- + from compatible? (1) + + 0.9.0 thru 1.2 1991-1995 CWI yes + 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes + 1.6 1.5.2 2000 CNRI no + 2.0 1.6 2000 BeOpen.com no + 1.6.1 1.6 2001 CNRI yes (2) + 2.1 2.0+1.6.1 2001 PSF no + 2.0.1 2.0+1.6.1 2001 PSF yes + 2.1.1 2.1+2.0.1 2001 PSF yes + 2.1.2 2.1.1 2002 PSF yes + 2.1.3 2.1.2 2002 PSF yes + 2.2 and above 2.1.1 2001-now PSF yes + +Footnotes: + +(1) GPL-compatible doesn't mean that we're distributing Python under + the GPL. All Python licenses, unlike the GPL, let you distribute + a modified version without making your changes open source. The + GPL-compatible licenses make it possible to combine Python with + other software that is released under the GPL; the others don't. + +(2) According to Richard Stallman, 1.6.1 is not GPL-compatible, + because its license has a choice of law clause. According to + CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1 + is "not incompatible" with the GPL. + +Thanks to the many outside volunteers who have worked under Guido's +direction to make these releases possible. + + +B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON +=============================================================== + +Python software and documentation are licensed under the +Python Software Foundation License Version 2. + +Starting with Python 3.8.6, examples, recipes, and other code in +the documentation are dual licensed under the PSF License Version 2 +and the Zero-Clause BSD license. + +Some software incorporated into Python is under different licenses. +The licenses are listed with code falling under that license. + + +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +-------------------------------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023 Python Software Foundation; +All Rights Reserved" are retained in Python alone or in any derivative version +prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + + +BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0 +------------------------------------------- + +BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1 + +1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an +office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the +Individual or Organization ("Licensee") accessing and otherwise using +this software in source or binary form and its associated +documentation ("the Software"). + +2. Subject to the terms and conditions of this BeOpen Python License +Agreement, BeOpen hereby grants Licensee a non-exclusive, +royalty-free, world-wide license to reproduce, analyze, test, perform +and/or display publicly, prepare derivative works, distribute, and +otherwise use the Software alone or in any derivative version, +provided, however, that the BeOpen Python License is retained in the +Software, alone or in any derivative version prepared by Licensee. + +3. BeOpen is making the Software available to Licensee on an "AS IS" +basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE +SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS +AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY +DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +5. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +6. This License Agreement shall be governed by and interpreted in all +respects by the law of the State of California, excluding conflict of +law provisions. Nothing in this License Agreement shall be deemed to +create any relationship of agency, partnership, or joint venture +between BeOpen and Licensee. This License Agreement does not grant +permission to use BeOpen trademarks or trade names in a trademark +sense to endorse or promote products or services of Licensee, or any +third party. As an exception, the "BeOpen Python" logos available at +http://www.pythonlabs.com/logos.html may be used according to the +permissions granted on that web page. + +7. By copying, installing or otherwise using the software, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + + +CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1 +--------------------------------------- + +1. This LICENSE AGREEMENT is between the Corporation for National +Research Initiatives, having an office at 1895 Preston White Drive, +Reston, VA 20191 ("CNRI"), and the Individual or Organization +("Licensee") accessing and otherwise using Python 1.6.1 software in +source or binary form and its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, CNRI +hereby grants Licensee a nonexclusive, royalty-free, world-wide +license to reproduce, analyze, test, perform and/or display publicly, +prepare derivative works, distribute, and otherwise use Python 1.6.1 +alone or in any derivative version, provided, however, that CNRI's +License Agreement and CNRI's notice of copyright, i.e., "Copyright (c) +1995-2001 Corporation for National Research Initiatives; All Rights +Reserved" are retained in Python 1.6.1 alone or in any derivative +version prepared by Licensee. Alternately, in lieu of CNRI's License +Agreement, Licensee may substitute the following text (omitting the +quotes): "Python 1.6.1 is made available subject to the terms and +conditions in CNRI's License Agreement. This Agreement together with +Python 1.6.1 may be located on the internet using the following +unique, persistent identifier (known as a handle): 1895.22/1013. This +Agreement may also be obtained from a proxy server on the internet +using the following URL: http://hdl.handle.net/1895.22/1013". + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python 1.6.1 or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python 1.6.1. + +4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS" +basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. This License Agreement shall be governed by the federal +intellectual property law of the United States, including without +limitation the federal copyright law, and, to the extent such +U.S. federal law does not apply, by the law of the Commonwealth of +Virginia, excluding Virginia's conflict of law provisions. +Notwithstanding the foregoing, with regard to derivative works based +on Python 1.6.1 that incorporate non-separable material that was +previously distributed under the GNU General Public License (GPL), the +law of the Commonwealth of Virginia shall govern this License +Agreement only as to issues arising under or with respect to +Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this +License Agreement shall be deemed to create any relationship of +agency, partnership, or joint venture between CNRI and Licensee. This +License Agreement does not grant permission to use CNRI trademarks or +trade name in a trademark sense to endorse or promote products or +services of Licensee, or any third party. + +8. By clicking on the "ACCEPT" button where indicated, or by copying, +installing or otherwise using Python 1.6.1, Licensee agrees to be +bound by the terms and conditions of this License Agreement. + + ACCEPT + + +CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2 +-------------------------------------------------- + +Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, +The Netherlands. All rights reserved. + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of Stichting Mathematisch +Centrum or CWI not be used in advertising or publicity pertaining to +distribution of the software without specific, written prior +permission. + +STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO +THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE +FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +ZERO-CLAUSE BSD LICENSE FOR CODE IN THE PYTHON DOCUMENTATION +---------------------------------------------------------------------- + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. diff --git a/py311/lib/python3.11/site-packages/aiohappyeyeballs-2.6.1.dist-info/METADATA b/py311/lib/python3.11/site-packages/aiohappyeyeballs-2.6.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..c632040d66bf120a377fc3785940934361273a66 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohappyeyeballs-2.6.1.dist-info/METADATA @@ -0,0 +1,123 @@ +Metadata-Version: 2.3 +Name: aiohappyeyeballs +Version: 2.6.1 +Summary: Happy Eyeballs for asyncio +License: PSF-2.0 +Author: J. Nick Koston +Author-email: nick@koston.org +Requires-Python: >=3.9 +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Natural Language :: English +Classifier: Operating System :: OS Independent +Classifier: Topic :: Software Development :: Libraries +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: License :: OSI Approved :: Python Software Foundation License +Project-URL: Bug Tracker, https://github.com/aio-libs/aiohappyeyeballs/issues +Project-URL: Changelog, https://github.com/aio-libs/aiohappyeyeballs/blob/main/CHANGELOG.md +Project-URL: Documentation, https://aiohappyeyeballs.readthedocs.io +Project-URL: Repository, https://github.com/aio-libs/aiohappyeyeballs +Description-Content-Type: text/markdown + +# aiohappyeyeballs + +

+ + CI Status + + + Documentation Status + + + Test coverage percentage + +

+

+ + Poetry + + + Ruff + + + pre-commit + +

+

+ + PyPI Version + + Supported Python versions + License +

+ +--- + +**Documentation**: https://aiohappyeyeballs.readthedocs.io + +**Source Code**: https://github.com/aio-libs/aiohappyeyeballs + +--- + +[Happy Eyeballs](https://en.wikipedia.org/wiki/Happy_Eyeballs) +([RFC 8305](https://www.rfc-editor.org/rfc/rfc8305.html)) + +## Use case + +This library exists to allow connecting with +[Happy Eyeballs](https://en.wikipedia.org/wiki/Happy_Eyeballs) +([RFC 8305](https://www.rfc-editor.org/rfc/rfc8305.html)) +when you +already have a list of addrinfo and not a DNS name. + +The stdlib version of `loop.create_connection()` +will only work when you pass in an unresolved name which +is not a good fit when using DNS caching or resolving +names via another method such as `zeroconf`. + +## Installation + +Install this via pip (or your favourite package manager): + +`pip install aiohappyeyeballs` + +## License + +[aiohappyeyeballs is licensed under the same terms as cpython itself.](https://github.com/python/cpython/blob/main/LICENSE) + +## Example usage + +```python + +addr_infos = await loop.getaddrinfo("example.org", 80) + +socket = await start_connection(addr_infos) +socket = await start_connection(addr_infos, local_addr_infos=local_addr_infos, happy_eyeballs_delay=0.2) + +transport, protocol = await loop.create_connection( + MyProtocol, sock=socket, ...) + +# Remove the first address for each family from addr_info +pop_addr_infos_interleave(addr_info, 1) + +# Remove all matching address from addr_info +remove_addr_infos(addr_info, "dead::beef::") + +# Convert a local_addr to local_addr_infos +local_addr_infos = addr_to_addr_infos(("127.0.0.1",0)) +``` + +## Credits + +This package contains code from cpython and is licensed under the same terms as cpython itself. + +This package was created with +[Copier](https://copier.readthedocs.io/) and the +[browniebroke/pypackage-template](https://github.com/browniebroke/pypackage-template) +project template. + diff --git a/py311/lib/python3.11/site-packages/aiohappyeyeballs-2.6.1.dist-info/RECORD b/py311/lib/python3.11/site-packages/aiohappyeyeballs-2.6.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..f98b55aafa7c029ddda6f3fea78ced5742c20e51 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohappyeyeballs-2.6.1.dist-info/RECORD @@ -0,0 +1,12 @@ +aiohappyeyeballs-2.6.1.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +aiohappyeyeballs-2.6.1.dist-info/LICENSE,sha256=Oy-B_iHRgcSZxZolbI4ZaEVdZonSaaqFNzv7avQdo78,13936 +aiohappyeyeballs-2.6.1.dist-info/METADATA,sha256=NSXlhJwAfi380eEjAo7BQ4P_TVal9xi0qkyZWibMsVM,5915 +aiohappyeyeballs-2.6.1.dist-info/RECORD,, +aiohappyeyeballs-2.6.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +aiohappyeyeballs-2.6.1.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88 +aiohappyeyeballs/__init__.py,sha256=x7kktHEtaD9quBcWDJPuLeKyjuVAI-Jj14S9B_5hcTs,361 +aiohappyeyeballs/_staggered.py,sha256=edfVowFx-P-ywJjIEF3MdPtEMVODujV6CeMYr65otac,6900 +aiohappyeyeballs/impl.py,sha256=Dlcm2mTJ28ucrGnxkb_fo9CZzLAkOOBizOt7dreBbXE,9681 +aiohappyeyeballs/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +aiohappyeyeballs/types.py,sha256=YZJIAnyoV4Dz0WFtlaf_OyE4EW7Xus1z7aIfNI6tDDQ,425 +aiohappyeyeballs/utils.py,sha256=on9GxIR0LhEfZu8P6Twi9hepX9zDanuZM20MWsb3xlQ,3028 diff --git a/py311/lib/python3.11/site-packages/aiohappyeyeballs-2.6.1.dist-info/REQUESTED b/py311/lib/python3.11/site-packages/aiohappyeyeballs-2.6.1.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/aiohappyeyeballs-2.6.1.dist-info/WHEEL b/py311/lib/python3.11/site-packages/aiohappyeyeballs-2.6.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..0582547b15f02d3a51659106262832565d5dc5ea --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohappyeyeballs-2.6.1.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: poetry-core 2.1.1 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/py311/lib/python3.11/site-packages/attr/__init__.py b/py311/lib/python3.11/site-packages/attr/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5c6e0650bc4bf53806420d7ef5f881ecd2bd77ea --- /dev/null +++ b/py311/lib/python3.11/site-packages/attr/__init__.py @@ -0,0 +1,104 @@ +# SPDX-License-Identifier: MIT + +""" +Classes Without Boilerplate +""" + +from functools import partial +from typing import Callable, Literal, Protocol + +from . import converters, exceptions, filters, setters, validators +from ._cmp import cmp_using +from ._config import get_run_validators, set_run_validators +from ._funcs import asdict, assoc, astuple, has, resolve_types +from ._make import ( + NOTHING, + Attribute, + Converter, + Factory, + _Nothing, + attrib, + attrs, + evolve, + fields, + fields_dict, + make_class, + validate, +) +from ._next_gen import define, field, frozen, mutable +from ._version_info import VersionInfo + + +s = attributes = attrs +ib = attr = attrib +dataclass = partial(attrs, auto_attribs=True) # happy Easter ;) + + +class AttrsInstance(Protocol): + pass + + +NothingType = Literal[_Nothing.NOTHING] + +__all__ = [ + "NOTHING", + "Attribute", + "AttrsInstance", + "Converter", + "Factory", + "NothingType", + "asdict", + "assoc", + "astuple", + "attr", + "attrib", + "attributes", + "attrs", + "cmp_using", + "converters", + "define", + "evolve", + "exceptions", + "field", + "fields", + "fields_dict", + "filters", + "frozen", + "get_run_validators", + "has", + "ib", + "make_class", + "mutable", + "resolve_types", + "s", + "set_run_validators", + "setters", + "validate", + "validators", +] + + +def _make_getattr(mod_name: str) -> Callable: + """ + Create a metadata proxy for packaging information that uses *mod_name* in + its warnings and errors. + """ + + def __getattr__(name: str) -> str: + if name not in ("__version__", "__version_info__"): + msg = f"module {mod_name} has no attribute {name}" + raise AttributeError(msg) + + from importlib.metadata import metadata + + meta = metadata("attrs") + + if name == "__version_info__": + return VersionInfo._from_version_string(meta["version"]) + + return meta["version"] + + return __getattr__ + + +__getattr__ = _make_getattr(__name__) diff --git a/py311/lib/python3.11/site-packages/attr/__init__.pyi b/py311/lib/python3.11/site-packages/attr/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..8d78fa19abe1a529af98889eb40017c32d5b62a2 --- /dev/null +++ b/py311/lib/python3.11/site-packages/attr/__init__.pyi @@ -0,0 +1,389 @@ +import enum +import sys + +from typing import ( + Any, + Callable, + Generic, + Literal, + Mapping, + Protocol, + Sequence, + TypeVar, + overload, +) + +# `import X as X` is required to make these public +from . import converters as converters +from . import exceptions as exceptions +from . import filters as filters +from . import setters as setters +from . import validators as validators +from ._cmp import cmp_using as cmp_using +from ._typing_compat import AttrsInstance_ +from ._version_info import VersionInfo +from attrs import ( + define as define, + field as field, + mutable as mutable, + frozen as frozen, + _EqOrderType, + _ValidatorType, + _ConverterType, + _ReprArgType, + _OnSetAttrType, + _OnSetAttrArgType, + _FieldTransformer, + _ValidatorArgType, +) + +if sys.version_info >= (3, 10): + from typing import TypeGuard, TypeAlias +else: + from typing_extensions import TypeGuard, TypeAlias + +if sys.version_info >= (3, 11): + from typing import dataclass_transform +else: + from typing_extensions import dataclass_transform + +__version__: str +__version_info__: VersionInfo +__title__: str +__description__: str +__url__: str +__uri__: str +__author__: str +__email__: str +__license__: str +__copyright__: str + +_T = TypeVar("_T") +_C = TypeVar("_C", bound=type) + +_FilterType = Callable[["Attribute[_T]", _T], bool] + +# We subclass this here to keep the protocol's qualified name clean. +class AttrsInstance(AttrsInstance_, Protocol): + pass + +_A = TypeVar("_A", bound=type[AttrsInstance]) + +class _Nothing(enum.Enum): + NOTHING = enum.auto() + +NOTHING = _Nothing.NOTHING +NothingType: TypeAlias = Literal[_Nothing.NOTHING] + +# NOTE: Factory lies about its return type to make this possible: +# `x: List[int] # = Factory(list)` +# Work around mypy issue #4554 in the common case by using an overload. + +@overload +def Factory(factory: Callable[[], _T]) -> _T: ... +@overload +def Factory( + factory: Callable[[Any], _T], + takes_self: Literal[True], +) -> _T: ... +@overload +def Factory( + factory: Callable[[], _T], + takes_self: Literal[False], +) -> _T: ... + +In = TypeVar("In") +Out = TypeVar("Out") + +class Converter(Generic[In, Out]): + @overload + def __init__(self, converter: Callable[[In], Out]) -> None: ... + @overload + def __init__( + self, + converter: Callable[[In, AttrsInstance, Attribute], Out], + *, + takes_self: Literal[True], + takes_field: Literal[True], + ) -> None: ... + @overload + def __init__( + self, + converter: Callable[[In, Attribute], Out], + *, + takes_field: Literal[True], + ) -> None: ... + @overload + def __init__( + self, + converter: Callable[[In, AttrsInstance], Out], + *, + takes_self: Literal[True], + ) -> None: ... + +class Attribute(Generic[_T]): + name: str + default: _T | None + validator: _ValidatorType[_T] | None + repr: _ReprArgType + cmp: _EqOrderType + eq: _EqOrderType + order: _EqOrderType + hash: bool | None + init: bool + converter: Converter | None + metadata: dict[Any, Any] + type: type[_T] | None + kw_only: bool + on_setattr: _OnSetAttrType + alias: str | None + + def evolve(self, **changes: Any) -> "Attribute[Any]": ... + +# NOTE: We had several choices for the annotation to use for type arg: +# 1) Type[_T] +# - Pros: Handles simple cases correctly +# - Cons: Might produce less informative errors in the case of conflicting +# TypeVars e.g. `attr.ib(default='bad', type=int)` +# 2) Callable[..., _T] +# - Pros: Better error messages than #1 for conflicting TypeVars +# - Cons: Terrible error messages for validator checks. +# e.g. attr.ib(type=int, validator=validate_str) +# -> error: Cannot infer function type argument +# 3) type (and do all of the work in the mypy plugin) +# - Pros: Simple here, and we could customize the plugin with our own errors. +# - Cons: Would need to write mypy plugin code to handle all the cases. +# We chose option #1. + +# `attr` lies about its return type to make the following possible: +# attr() -> Any +# attr(8) -> int +# attr(validator=) -> Whatever the callable expects. +# This makes this type of assignments possible: +# x: int = attr(8) +# +# This form catches explicit None or no default but with no other arguments +# returns Any. +@overload +def attrib( + default: None = ..., + validator: None = ..., + repr: _ReprArgType = ..., + cmp: _EqOrderType | None = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + type: None = ..., + converter: None = ..., + factory: None = ..., + kw_only: bool | None = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., +) -> Any: ... + +# This form catches an explicit None or no default and infers the type from the +# other arguments. +@overload +def attrib( + default: None = ..., + validator: _ValidatorArgType[_T] | None = ..., + repr: _ReprArgType = ..., + cmp: _EqOrderType | None = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + type: type[_T] | None = ..., + converter: _ConverterType + | list[_ConverterType] + | tuple[_ConverterType] + | None = ..., + factory: Callable[[], _T] | None = ..., + kw_only: bool | None = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., +) -> _T: ... + +# This form catches an explicit default argument. +@overload +def attrib( + default: _T, + validator: _ValidatorArgType[_T] | None = ..., + repr: _ReprArgType = ..., + cmp: _EqOrderType | None = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + type: type[_T] | None = ..., + converter: _ConverterType + | list[_ConverterType] + | tuple[_ConverterType] + | None = ..., + factory: Callable[[], _T] | None = ..., + kw_only: bool | None = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., +) -> _T: ... + +# This form covers type=non-Type: e.g. forward references (str), Any +@overload +def attrib( + default: _T | None = ..., + validator: _ValidatorArgType[_T] | None = ..., + repr: _ReprArgType = ..., + cmp: _EqOrderType | None = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + type: object = ..., + converter: _ConverterType + | list[_ConverterType] + | tuple[_ConverterType] + | None = ..., + factory: Callable[[], _T] | None = ..., + kw_only: bool | None = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., +) -> Any: ... +@overload +@dataclass_transform(order_default=True, field_specifiers=(attrib, field)) +def attrs( + maybe_cls: _C, + these: dict[str, Any] | None = ..., + repr_ns: str | None = ..., + repr: bool = ..., + cmp: _EqOrderType | None = ..., + hash: bool | None = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + auto_detect: bool = ..., + collect_by_mro: bool = ..., + getstate_setstate: bool | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + field_transformer: _FieldTransformer | None = ..., + match_args: bool = ..., + unsafe_hash: bool | None = ..., +) -> _C: ... +@overload +@dataclass_transform(order_default=True, field_specifiers=(attrib, field)) +def attrs( + maybe_cls: None = ..., + these: dict[str, Any] | None = ..., + repr_ns: str | None = ..., + repr: bool = ..., + cmp: _EqOrderType | None = ..., + hash: bool | None = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + auto_detect: bool = ..., + collect_by_mro: bool = ..., + getstate_setstate: bool | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + field_transformer: _FieldTransformer | None = ..., + match_args: bool = ..., + unsafe_hash: bool | None = ..., +) -> Callable[[_C], _C]: ... +def fields(cls: type[AttrsInstance]) -> Any: ... +def fields_dict(cls: type[AttrsInstance]) -> dict[str, Attribute[Any]]: ... +def validate(inst: AttrsInstance) -> None: ... +def resolve_types( + cls: _A, + globalns: dict[str, Any] | None = ..., + localns: dict[str, Any] | None = ..., + attribs: list[Attribute[Any]] | None = ..., + include_extras: bool = ..., +) -> _A: ... + +# TODO: add support for returning a proper attrs class from the mypy plugin +# we use Any instead of _CountingAttr so that e.g. `make_class('Foo', +# [attr.ib()])` is valid +def make_class( + name: str, + attrs: list[str] | tuple[str, ...] | dict[str, Any], + bases: tuple[type, ...] = ..., + class_body: dict[str, Any] | None = ..., + repr_ns: str | None = ..., + repr: bool = ..., + cmp: _EqOrderType | None = ..., + hash: bool | None = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + collect_by_mro: bool = ..., + on_setattr: _OnSetAttrArgType | None = ..., + field_transformer: _FieldTransformer | None = ..., +) -> type: ... + +# _funcs -- + +# TODO: add support for returning TypedDict from the mypy plugin +# FIXME: asdict/astuple do not honor their factory args. Waiting on one of +# these: +# https://github.com/python/mypy/issues/4236 +# https://github.com/python/typing/issues/253 +# XXX: remember to fix attrs.asdict/astuple too! +def asdict( + inst: AttrsInstance, + recurse: bool = ..., + filter: _FilterType[Any] | None = ..., + dict_factory: type[Mapping[Any, Any]] = ..., + retain_collection_types: bool = ..., + value_serializer: Callable[[type, Attribute[Any], Any], Any] | None = ..., + tuple_keys: bool | None = ..., +) -> dict[str, Any]: ... + +# TODO: add support for returning NamedTuple from the mypy plugin +def astuple( + inst: AttrsInstance, + recurse: bool = ..., + filter: _FilterType[Any] | None = ..., + tuple_factory: type[Sequence[Any]] = ..., + retain_collection_types: bool = ..., +) -> tuple[Any, ...]: ... +def has(cls: type) -> TypeGuard[type[AttrsInstance]]: ... +def assoc(inst: _T, **changes: Any) -> _T: ... +def evolve(inst: _T, **changes: Any) -> _T: ... + +# _config -- + +def set_run_validators(run: bool) -> None: ... +def get_run_validators() -> bool: ... + +# aliases -- + +s = attributes = attrs +ib = attr = attrib +dataclass = attrs # Technically, partial(attrs, auto_attribs=True) ;) diff --git a/py311/lib/python3.11/site-packages/attr/__pycache__/__init__.cpython-311.pyc b/py311/lib/python3.11/site-packages/attr/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57630b80a5266640a7741a0d708300cd0c7fb528 Binary files /dev/null and b/py311/lib/python3.11/site-packages/attr/__pycache__/__init__.cpython-311.pyc differ diff --git a/py311/lib/python3.11/site-packages/attr/__pycache__/_cmp.cpython-311.pyc b/py311/lib/python3.11/site-packages/attr/__pycache__/_cmp.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af74683ca3e688fe17e0b556bdd5a0368809a5da Binary files /dev/null and b/py311/lib/python3.11/site-packages/attr/__pycache__/_cmp.cpython-311.pyc differ diff --git a/py311/lib/python3.11/site-packages/attr/__pycache__/_compat.cpython-311.pyc b/py311/lib/python3.11/site-packages/attr/__pycache__/_compat.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64e72b37435eeaaa12aa53c8d531ebac528de4d9 Binary files /dev/null and b/py311/lib/python3.11/site-packages/attr/__pycache__/_compat.cpython-311.pyc differ diff --git a/py311/lib/python3.11/site-packages/attr/__pycache__/_config.cpython-311.pyc b/py311/lib/python3.11/site-packages/attr/__pycache__/_config.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4286cbe54864b616f1c96e7361e49b16bf905d72 Binary files /dev/null and b/py311/lib/python3.11/site-packages/attr/__pycache__/_config.cpython-311.pyc differ diff --git a/py311/lib/python3.11/site-packages/attr/__pycache__/_funcs.cpython-311.pyc b/py311/lib/python3.11/site-packages/attr/__pycache__/_funcs.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..558abef9004625e971222018b033fbd5ec43a285 Binary files /dev/null and b/py311/lib/python3.11/site-packages/attr/__pycache__/_funcs.cpython-311.pyc differ diff --git a/py311/lib/python3.11/site-packages/attr/__pycache__/_next_gen.cpython-311.pyc b/py311/lib/python3.11/site-packages/attr/__pycache__/_next_gen.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6268119d3ee832e953e8e53c43a790320f24bc0 Binary files /dev/null and b/py311/lib/python3.11/site-packages/attr/__pycache__/_next_gen.cpython-311.pyc differ diff --git a/py311/lib/python3.11/site-packages/attr/__pycache__/_version_info.cpython-311.pyc b/py311/lib/python3.11/site-packages/attr/__pycache__/_version_info.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7837624c8755cd27857ed47786e7715c12a4c107 Binary files /dev/null and b/py311/lib/python3.11/site-packages/attr/__pycache__/_version_info.cpython-311.pyc differ diff --git a/py311/lib/python3.11/site-packages/attr/__pycache__/converters.cpython-311.pyc b/py311/lib/python3.11/site-packages/attr/__pycache__/converters.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eccd88d1a3a0f1de2ba09515fe96ad494b6e0420 Binary files /dev/null and b/py311/lib/python3.11/site-packages/attr/__pycache__/converters.cpython-311.pyc differ diff --git a/py311/lib/python3.11/site-packages/attr/__pycache__/exceptions.cpython-311.pyc b/py311/lib/python3.11/site-packages/attr/__pycache__/exceptions.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d8eb7e4359bdd3df0ee90ca329251009be13e0d Binary files /dev/null and b/py311/lib/python3.11/site-packages/attr/__pycache__/exceptions.cpython-311.pyc differ diff --git a/py311/lib/python3.11/site-packages/attr/__pycache__/filters.cpython-311.pyc b/py311/lib/python3.11/site-packages/attr/__pycache__/filters.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..774d47777d53ce5823fe21802e37ae2caad5b860 Binary files /dev/null and b/py311/lib/python3.11/site-packages/attr/__pycache__/filters.cpython-311.pyc differ diff --git a/py311/lib/python3.11/site-packages/attr/__pycache__/setters.cpython-311.pyc b/py311/lib/python3.11/site-packages/attr/__pycache__/setters.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e59cf67598bd57cfd8f3cb5320272c81d467cbd Binary files /dev/null and b/py311/lib/python3.11/site-packages/attr/__pycache__/setters.cpython-311.pyc differ diff --git a/py311/lib/python3.11/site-packages/attr/__pycache__/validators.cpython-311.pyc b/py311/lib/python3.11/site-packages/attr/__pycache__/validators.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98270e24ce65dd6801487457d5e25e3adb4d2e5d Binary files /dev/null and b/py311/lib/python3.11/site-packages/attr/__pycache__/validators.cpython-311.pyc differ diff --git a/py311/lib/python3.11/site-packages/attr/_cmp.py b/py311/lib/python3.11/site-packages/attr/_cmp.py new file mode 100644 index 0000000000000000000000000000000000000000..09bab491f83ef4d15129f34b5f5a9e69bb34d63c --- /dev/null +++ b/py311/lib/python3.11/site-packages/attr/_cmp.py @@ -0,0 +1,160 @@ +# SPDX-License-Identifier: MIT + + +import functools +import types + +from ._make import __ne__ + + +_operation_names = {"eq": "==", "lt": "<", "le": "<=", "gt": ">", "ge": ">="} + + +def cmp_using( + eq=None, + lt=None, + le=None, + gt=None, + ge=None, + require_same_type=True, + class_name="Comparable", +): + """ + Create a class that can be passed into `attrs.field`'s ``eq``, ``order``, + and ``cmp`` arguments to customize field comparison. + + The resulting class will have a full set of ordering methods if at least + one of ``{lt, le, gt, ge}`` and ``eq`` are provided. + + Args: + eq (typing.Callable | None): + Callable used to evaluate equality of two objects. + + lt (typing.Callable | None): + Callable used to evaluate whether one object is less than another + object. + + le (typing.Callable | None): + Callable used to evaluate whether one object is less than or equal + to another object. + + gt (typing.Callable | None): + Callable used to evaluate whether one object is greater than + another object. + + ge (typing.Callable | None): + Callable used to evaluate whether one object is greater than or + equal to another object. + + require_same_type (bool): + When `True`, equality and ordering methods will return + `NotImplemented` if objects are not of the same type. + + class_name (str | None): Name of class. Defaults to "Comparable". + + See `comparison` for more details. + + .. versionadded:: 21.1.0 + """ + + body = { + "__slots__": ["value"], + "__init__": _make_init(), + "_requirements": [], + "_is_comparable_to": _is_comparable_to, + } + + # Add operations. + num_order_functions = 0 + has_eq_function = False + + if eq is not None: + has_eq_function = True + body["__eq__"] = _make_operator("eq", eq) + body["__ne__"] = __ne__ + + if lt is not None: + num_order_functions += 1 + body["__lt__"] = _make_operator("lt", lt) + + if le is not None: + num_order_functions += 1 + body["__le__"] = _make_operator("le", le) + + if gt is not None: + num_order_functions += 1 + body["__gt__"] = _make_operator("gt", gt) + + if ge is not None: + num_order_functions += 1 + body["__ge__"] = _make_operator("ge", ge) + + type_ = types.new_class( + class_name, (object,), {}, lambda ns: ns.update(body) + ) + + # Add same type requirement. + if require_same_type: + type_._requirements.append(_check_same_type) + + # Add total ordering if at least one operation was defined. + if 0 < num_order_functions < 4: + if not has_eq_function: + # functools.total_ordering requires __eq__ to be defined, + # so raise early error here to keep a nice stack. + msg = "eq must be define is order to complete ordering from lt, le, gt, ge." + raise ValueError(msg) + type_ = functools.total_ordering(type_) + + return type_ + + +def _make_init(): + """ + Create __init__ method. + """ + + def __init__(self, value): + """ + Initialize object with *value*. + """ + self.value = value + + return __init__ + + +def _make_operator(name, func): + """ + Create operator method. + """ + + def method(self, other): + if not self._is_comparable_to(other): + return NotImplemented + + result = func(self.value, other.value) + if result is NotImplemented: + return NotImplemented + + return result + + method.__name__ = f"__{name}__" + method.__doc__ = ( + f"Return a {_operation_names[name]} b. Computed by attrs." + ) + + return method + + +def _is_comparable_to(self, other): + """ + Check whether `other` is comparable to `self`. + """ + return all(func(self, other) for func in self._requirements) + + +def _check_same_type(self, other): + """ + Return True if *self* and *other* are of the same type, False otherwise. + """ + return other.value.__class__ is self.value.__class__ diff --git a/py311/lib/python3.11/site-packages/attr/_cmp.pyi b/py311/lib/python3.11/site-packages/attr/_cmp.pyi new file mode 100644 index 0000000000000000000000000000000000000000..cc7893b04520afa719b1412c7646c3c1b39bf94b --- /dev/null +++ b/py311/lib/python3.11/site-packages/attr/_cmp.pyi @@ -0,0 +1,13 @@ +from typing import Any, Callable + +_CompareWithType = Callable[[Any, Any], bool] + +def cmp_using( + eq: _CompareWithType | None = ..., + lt: _CompareWithType | None = ..., + le: _CompareWithType | None = ..., + gt: _CompareWithType | None = ..., + ge: _CompareWithType | None = ..., + require_same_type: bool = ..., + class_name: str = ..., +) -> type: ... diff --git a/py311/lib/python3.11/site-packages/attr/_compat.py b/py311/lib/python3.11/site-packages/attr/_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..bc68ed9eaf9853bcc6065e7691209a787bf2eccc --- /dev/null +++ b/py311/lib/python3.11/site-packages/attr/_compat.py @@ -0,0 +1,99 @@ +# SPDX-License-Identifier: MIT + +import inspect +import platform +import sys +import threading + +from collections.abc import Mapping, Sequence # noqa: F401 +from typing import _GenericAlias + + +PYPY = platform.python_implementation() == "PyPy" +PY_3_10_PLUS = sys.version_info[:2] >= (3, 10) +PY_3_11_PLUS = sys.version_info[:2] >= (3, 11) +PY_3_12_PLUS = sys.version_info[:2] >= (3, 12) +PY_3_13_PLUS = sys.version_info[:2] >= (3, 13) +PY_3_14_PLUS = sys.version_info[:2] >= (3, 14) + + +if PY_3_14_PLUS: + import annotationlib + + # We request forward-ref annotations to not break in the presence of + # forward references. + + def _get_annotations(cls): + return annotationlib.get_annotations( + cls, format=annotationlib.Format.FORWARDREF + ) + +else: + + def _get_annotations(cls): + """ + Get annotations for *cls*. + """ + return cls.__dict__.get("__annotations__", {}) + + +class _AnnotationExtractor: + """ + Extract type annotations from a callable, returning None whenever there + is none. + """ + + __slots__ = ["sig"] + + def __init__(self, callable): + try: + self.sig = inspect.signature(callable) + except (ValueError, TypeError): # inspect failed + self.sig = None + + def get_first_param_type(self): + """ + Return the type annotation of the first argument if it's not empty. + """ + if not self.sig: + return None + + params = list(self.sig.parameters.values()) + if params and params[0].annotation is not inspect.Parameter.empty: + return params[0].annotation + + return None + + def get_return_type(self): + """ + Return the return type if it's not empty. + """ + if ( + self.sig + and self.sig.return_annotation is not inspect.Signature.empty + ): + return self.sig.return_annotation + + return None + + +# Thread-local global to track attrs instances which are already being repr'd. +# This is needed because there is no other (thread-safe) way to pass info +# about the instances that are already being repr'd through the call stack +# in order to ensure we don't perform infinite recursion. +# +# For instance, if an instance contains a dict which contains that instance, +# we need to know that we're already repr'ing the outside instance from within +# the dict's repr() call. +# +# This lives here rather than in _make.py so that the functions in _make.py +# don't have a direct reference to the thread-local in their globals dict. +# If they have such a reference, it breaks cloudpickle. +repr_context = threading.local() + + +def get_generic_base(cl): + """If this is a generic class (A[str]), return the generic base for it.""" + if cl.__class__ is _GenericAlias: + return cl.__origin__ + return None diff --git a/py311/lib/python3.11/site-packages/attr/_config.py b/py311/lib/python3.11/site-packages/attr/_config.py new file mode 100644 index 0000000000000000000000000000000000000000..4b257726fb1e8b95583ecc3eee8d153336dc4089 --- /dev/null +++ b/py311/lib/python3.11/site-packages/attr/_config.py @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: MIT + +__all__ = ["get_run_validators", "set_run_validators"] + +_run_validators = True + + +def set_run_validators(run): + """ + Set whether or not validators are run. By default, they are run. + + .. deprecated:: 21.3.0 It will not be removed, but it also will not be + moved to new ``attrs`` namespace. Use `attrs.validators.set_disabled()` + instead. + """ + if not isinstance(run, bool): + msg = "'run' must be bool." + raise TypeError(msg) + global _run_validators + _run_validators = run + + +def get_run_validators(): + """ + Return whether or not validators are run. + + .. deprecated:: 21.3.0 It will not be removed, but it also will not be + moved to new ``attrs`` namespace. Use `attrs.validators.get_disabled()` + instead. + """ + return _run_validators diff --git a/py311/lib/python3.11/site-packages/attr/_funcs.py b/py311/lib/python3.11/site-packages/attr/_funcs.py new file mode 100644 index 0000000000000000000000000000000000000000..1adb50021373d9c09fcb9db0641bbc03248d54a3 --- /dev/null +++ b/py311/lib/python3.11/site-packages/attr/_funcs.py @@ -0,0 +1,497 @@ +# SPDX-License-Identifier: MIT + + +import copy + +from ._compat import get_generic_base +from ._make import _OBJ_SETATTR, NOTHING, fields +from .exceptions import AttrsAttributeNotFoundError + + +_ATOMIC_TYPES = frozenset( + { + type(None), + bool, + int, + float, + str, + complex, + bytes, + type(...), + type, + range, + property, + } +) + + +def asdict( + inst, + recurse=True, + filter=None, + dict_factory=dict, + retain_collection_types=False, + value_serializer=None, +): + """ + Return the *attrs* attribute values of *inst* as a dict. + + Optionally recurse into other *attrs*-decorated classes. + + Args: + inst: Instance of an *attrs*-decorated class. + + recurse (bool): Recurse into classes that are also *attrs*-decorated. + + filter (~typing.Callable): + A callable whose return code determines whether an attribute or + element is included (`True`) or dropped (`False`). Is called with + the `attrs.Attribute` as the first argument and the value as the + second argument. + + dict_factory (~typing.Callable): + A callable to produce dictionaries from. For example, to produce + ordered dictionaries instead of normal Python dictionaries, pass in + ``collections.OrderedDict``. + + retain_collection_types (bool): + Do not convert to `list` when encountering an attribute whose type + is `tuple` or `set`. Only meaningful if *recurse* is `True`. + + value_serializer (typing.Callable | None): + A hook that is called for every attribute or dict key/value. It + receives the current instance, field and value and must return the + (updated) value. The hook is run *after* the optional *filter* has + been applied. + + Returns: + Return type of *dict_factory*. + + Raises: + attrs.exceptions.NotAnAttrsClassError: + If *cls* is not an *attrs* class. + + .. versionadded:: 16.0.0 *dict_factory* + .. versionadded:: 16.1.0 *retain_collection_types* + .. versionadded:: 20.3.0 *value_serializer* + .. versionadded:: 21.3.0 + If a dict has a collection for a key, it is serialized as a tuple. + """ + attrs = fields(inst.__class__) + rv = dict_factory() + for a in attrs: + v = getattr(inst, a.name) + if filter is not None and not filter(a, v): + continue + + if value_serializer is not None: + v = value_serializer(inst, a, v) + + if recurse is True: + value_type = type(v) + if value_type in _ATOMIC_TYPES: + rv[a.name] = v + elif has(value_type): + rv[a.name] = asdict( + v, + recurse=True, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + elif issubclass(value_type, (tuple, list, set, frozenset)): + cf = value_type if retain_collection_types is True else list + items = [ + _asdict_anything( + i, + is_key=False, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + for i in v + ] + try: + rv[a.name] = cf(items) + except TypeError: + if not issubclass(cf, tuple): + raise + # Workaround for TypeError: cf.__new__() missing 1 required + # positional argument (which appears, for a namedturle) + rv[a.name] = cf(*items) + elif issubclass(value_type, dict): + df = dict_factory + rv[a.name] = df( + ( + _asdict_anything( + kk, + is_key=True, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + _asdict_anything( + vv, + is_key=False, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + ) + for kk, vv in v.items() + ) + else: + rv[a.name] = v + else: + rv[a.name] = v + return rv + + +def _asdict_anything( + val, + is_key, + filter, + dict_factory, + retain_collection_types, + value_serializer, +): + """ + ``asdict`` only works on attrs instances, this works on anything. + """ + val_type = type(val) + if val_type in _ATOMIC_TYPES: + rv = val + if value_serializer is not None: + rv = value_serializer(None, None, rv) + elif getattr(val_type, "__attrs_attrs__", None) is not None: + # Attrs class. + rv = asdict( + val, + recurse=True, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + elif issubclass(val_type, (tuple, list, set, frozenset)): + if retain_collection_types is True: + cf = val.__class__ + elif is_key: + cf = tuple + else: + cf = list + + rv = cf( + [ + _asdict_anything( + i, + is_key=False, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + for i in val + ] + ) + elif issubclass(val_type, dict): + df = dict_factory + rv = df( + ( + _asdict_anything( + kk, + is_key=True, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + _asdict_anything( + vv, + is_key=False, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + ) + for kk, vv in val.items() + ) + else: + rv = val + if value_serializer is not None: + rv = value_serializer(None, None, rv) + + return rv + + +def astuple( + inst, + recurse=True, + filter=None, + tuple_factory=tuple, + retain_collection_types=False, +): + """ + Return the *attrs* attribute values of *inst* as a tuple. + + Optionally recurse into other *attrs*-decorated classes. + + Args: + inst: Instance of an *attrs*-decorated class. + + recurse (bool): + Recurse into classes that are also *attrs*-decorated. + + filter (~typing.Callable): + A callable whose return code determines whether an attribute or + element is included (`True`) or dropped (`False`). Is called with + the `attrs.Attribute` as the first argument and the value as the + second argument. + + tuple_factory (~typing.Callable): + A callable to produce tuples from. For example, to produce lists + instead of tuples. + + retain_collection_types (bool): + Do not convert to `list` or `dict` when encountering an attribute + which type is `tuple`, `dict` or `set`. Only meaningful if + *recurse* is `True`. + + Returns: + Return type of *tuple_factory* + + Raises: + attrs.exceptions.NotAnAttrsClassError: + If *cls* is not an *attrs* class. + + .. versionadded:: 16.2.0 + """ + attrs = fields(inst.__class__) + rv = [] + retain = retain_collection_types # Very long. :/ + for a in attrs: + v = getattr(inst, a.name) + if filter is not None and not filter(a, v): + continue + value_type = type(v) + if recurse is True: + if value_type in _ATOMIC_TYPES: + rv.append(v) + elif has(value_type): + rv.append( + astuple( + v, + recurse=True, + filter=filter, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + ) + elif issubclass(value_type, (tuple, list, set, frozenset)): + cf = v.__class__ if retain is True else list + items = [ + ( + astuple( + j, + recurse=True, + filter=filter, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(j.__class__) + else j + ) + for j in v + ] + try: + rv.append(cf(items)) + except TypeError: + if not issubclass(cf, tuple): + raise + # Workaround for TypeError: cf.__new__() missing 1 required + # positional argument (which appears, for a namedturle) + rv.append(cf(*items)) + elif issubclass(value_type, dict): + df = value_type if retain is True else dict + rv.append( + df( + ( + ( + astuple( + kk, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(kk.__class__) + else kk + ), + ( + astuple( + vv, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(vv.__class__) + else vv + ), + ) + for kk, vv in v.items() + ) + ) + else: + rv.append(v) + else: + rv.append(v) + + return rv if tuple_factory is list else tuple_factory(rv) + + +def has(cls): + """ + Check whether *cls* is a class with *attrs* attributes. + + Args: + cls (type): Class to introspect. + + Raises: + TypeError: If *cls* is not a class. + + Returns: + bool: + """ + attrs = getattr(cls, "__attrs_attrs__", None) + if attrs is not None: + return True + + # No attrs, maybe it's a specialized generic (A[str])? + generic_base = get_generic_base(cls) + if generic_base is not None: + generic_attrs = getattr(generic_base, "__attrs_attrs__", None) + if generic_attrs is not None: + # Stick it on here for speed next time. + cls.__attrs_attrs__ = generic_attrs + return generic_attrs is not None + return False + + +def assoc(inst, **changes): + """ + Copy *inst* and apply *changes*. + + This is different from `evolve` that applies the changes to the arguments + that create the new instance. + + `evolve`'s behavior is preferable, but there are `edge cases`_ where it + doesn't work. Therefore `assoc` is deprecated, but will not be removed. + + .. _`edge cases`: https://github.com/python-attrs/attrs/issues/251 + + Args: + inst: Instance of a class with *attrs* attributes. + + changes: Keyword changes in the new copy. + + Returns: + A copy of inst with *changes* incorporated. + + Raises: + attrs.exceptions.AttrsAttributeNotFoundError: + If *attr_name* couldn't be found on *cls*. + + attrs.exceptions.NotAnAttrsClassError: + If *cls* is not an *attrs* class. + + .. deprecated:: 17.1.0 + Use `attrs.evolve` instead if you can. This function will not be + removed du to the slightly different approach compared to + `attrs.evolve`, though. + """ + new = copy.copy(inst) + attrs = fields(inst.__class__) + for k, v in changes.items(): + a = getattr(attrs, k, NOTHING) + if a is NOTHING: + msg = f"{k} is not an attrs attribute on {new.__class__}." + raise AttrsAttributeNotFoundError(msg) + _OBJ_SETATTR(new, k, v) + return new + + +def resolve_types( + cls, globalns=None, localns=None, attribs=None, include_extras=True +): + """ + Resolve any strings and forward annotations in type annotations. + + This is only required if you need concrete types in :class:`Attribute`'s + *type* field. In other words, you don't need to resolve your types if you + only use them for static type checking. + + With no arguments, names will be looked up in the module in which the class + was created. If this is not what you want, for example, if the name only + exists inside a method, you may pass *globalns* or *localns* to specify + other dictionaries in which to look up these names. See the docs of + `typing.get_type_hints` for more details. + + Args: + cls (type): Class to resolve. + + globalns (dict | None): Dictionary containing global variables. + + localns (dict | None): Dictionary containing local variables. + + attribs (list | None): + List of attribs for the given class. This is necessary when calling + from inside a ``field_transformer`` since *cls* is not an *attrs* + class yet. + + include_extras (bool): + Resolve more accurately, if possible. Pass ``include_extras`` to + ``typing.get_hints``, if supported by the typing module. On + supported Python versions (3.9+), this resolves the types more + accurately. + + Raises: + TypeError: If *cls* is not a class. + + attrs.exceptions.NotAnAttrsClassError: + If *cls* is not an *attrs* class and you didn't pass any attribs. + + NameError: If types cannot be resolved because of missing variables. + + Returns: + *cls* so you can use this function also as a class decorator. Please + note that you have to apply it **after** `attrs.define`. That means the + decorator has to come in the line **before** `attrs.define`. + + .. versionadded:: 20.1.0 + .. versionadded:: 21.1.0 *attribs* + .. versionadded:: 23.1.0 *include_extras* + """ + # Since calling get_type_hints is expensive we cache whether we've + # done it already. + if getattr(cls, "__attrs_types_resolved__", None) != cls: + import typing + + kwargs = { + "globalns": globalns, + "localns": localns, + "include_extras": include_extras, + } + + hints = typing.get_type_hints(cls, **kwargs) + for field in fields(cls) if attribs is None else attribs: + if field.name in hints: + # Since fields have been frozen we must work around it. + _OBJ_SETATTR(field, "type", hints[field.name]) + # We store the class we resolved so that subclasses know they haven't + # been resolved. + cls.__attrs_types_resolved__ = cls + + # Return the class so you can use it as a decorator too. + return cls diff --git a/py311/lib/python3.11/site-packages/attr/_make.py b/py311/lib/python3.11/site-packages/attr/_make.py new file mode 100644 index 0000000000000000000000000000000000000000..d24d9ba98575c3c4beb3186ca0335d2175663c7f --- /dev/null +++ b/py311/lib/python3.11/site-packages/attr/_make.py @@ -0,0 +1,3362 @@ +# SPDX-License-Identifier: MIT + +from __future__ import annotations + +import abc +import contextlib +import copy +import enum +import inspect +import itertools +import linecache +import sys +import types +import unicodedata +import weakref + +from collections.abc import Callable, Mapping +from functools import cached_property +from typing import Any, NamedTuple, TypeVar + +# We need to import _compat itself in addition to the _compat members to avoid +# having the thread-local in the globals here. +from . import _compat, _config, setters +from ._compat import ( + PY_3_10_PLUS, + PY_3_11_PLUS, + PY_3_13_PLUS, + _AnnotationExtractor, + _get_annotations, + get_generic_base, +) +from .exceptions import ( + DefaultAlreadySetError, + FrozenInstanceError, + NotAnAttrsClassError, + UnannotatedAttributeError, +) + + +# This is used at least twice, so cache it here. +_OBJ_SETATTR = object.__setattr__ +_INIT_FACTORY_PAT = "__attr_factory_%s" +_CLASSVAR_PREFIXES = ( + "typing.ClassVar", + "t.ClassVar", + "ClassVar", + "typing_extensions.ClassVar", +) +# we don't use a double-underscore prefix because that triggers +# name mangling when trying to create a slot for the field +# (when slots=True) +_HASH_CACHE_FIELD = "_attrs_cached_hash" + +_EMPTY_METADATA_SINGLETON = types.MappingProxyType({}) + +# Unique object for unequivocal getattr() defaults. +_SENTINEL = object() + +_DEFAULT_ON_SETATTR = setters.pipe(setters.convert, setters.validate) + + +class _Nothing(enum.Enum): + """ + Sentinel to indicate the lack of a value when `None` is ambiguous. + + If extending attrs, you can use ``typing.Literal[NOTHING]`` to show + that a value may be ``NOTHING``. + + .. versionchanged:: 21.1.0 ``bool(NOTHING)`` is now False. + .. versionchanged:: 22.2.0 ``NOTHING`` is now an ``enum.Enum`` variant. + """ + + NOTHING = enum.auto() + + def __repr__(self): + return "NOTHING" + + def __bool__(self): + return False + + +NOTHING = _Nothing.NOTHING +""" +Sentinel to indicate the lack of a value when `None` is ambiguous. + +When using in 3rd party code, use `attrs.NothingType` for type annotations. +""" + + +class _CacheHashWrapper(int): + """ + An integer subclass that pickles / copies as None + + This is used for non-slots classes with ``cache_hash=True``, to avoid + serializing a potentially (even likely) invalid hash value. Since `None` + is the default value for uncalculated hashes, whenever this is copied, + the copy's value for the hash should automatically reset. + + See GH #613 for more details. + """ + + def __reduce__(self, _none_constructor=type(None), _args=()): # noqa: B008 + return _none_constructor, _args + + +def attrib( + default=NOTHING, + validator=None, + repr=True, + cmp=None, + hash=None, + init=True, + metadata=None, + type=None, + converter=None, + factory=None, + kw_only=None, + eq=None, + order=None, + on_setattr=None, + alias=None, +): + """ + Create a new field / attribute on a class. + + Identical to `attrs.field`, except it's not keyword-only. + + Consider using `attrs.field` in new code (``attr.ib`` will *never* go away, + though). + + .. warning:: + + Does **nothing** unless the class is also decorated with + `attr.s` (or similar)! + + + .. versionadded:: 15.2.0 *convert* + .. versionadded:: 16.3.0 *metadata* + .. versionchanged:: 17.1.0 *validator* can be a ``list`` now. + .. versionchanged:: 17.1.0 + *hash* is `None` and therefore mirrors *eq* by default. + .. versionadded:: 17.3.0 *type* + .. deprecated:: 17.4.0 *convert* + .. versionadded:: 17.4.0 + *converter* as a replacement for the deprecated *convert* to achieve + consistency with other noun-based arguments. + .. versionadded:: 18.1.0 + ``factory=f`` is syntactic sugar for ``default=attr.Factory(f)``. + .. versionadded:: 18.2.0 *kw_only* + .. versionchanged:: 19.2.0 *convert* keyword argument removed. + .. versionchanged:: 19.2.0 *repr* also accepts a custom callable. + .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. + .. versionadded:: 19.2.0 *eq* and *order* + .. versionadded:: 20.1.0 *on_setattr* + .. versionchanged:: 20.3.0 *kw_only* backported to Python 2 + .. versionchanged:: 21.1.0 + *eq*, *order*, and *cmp* also accept a custom callable + .. versionchanged:: 21.1.0 *cmp* undeprecated + .. versionadded:: 22.2.0 *alias* + .. versionchanged:: 25.4.0 + *kw_only* can now be None, and its default is also changed from False to + None. + """ + eq, eq_key, order, order_key = _determine_attrib_eq_order( + cmp, eq, order, True + ) + + if hash is not None and hash is not True and hash is not False: + msg = "Invalid value for hash. Must be True, False, or None." + raise TypeError(msg) + + if factory is not None: + if default is not NOTHING: + msg = ( + "The `default` and `factory` arguments are mutually exclusive." + ) + raise ValueError(msg) + if not callable(factory): + msg = "The `factory` argument must be a callable." + raise ValueError(msg) + default = Factory(factory) + + if metadata is None: + metadata = {} + + # Apply syntactic sugar by auto-wrapping. + if isinstance(on_setattr, (list, tuple)): + on_setattr = setters.pipe(*on_setattr) + + if validator and isinstance(validator, (list, tuple)): + validator = and_(*validator) + + if converter and isinstance(converter, (list, tuple)): + converter = pipe(*converter) + + return _CountingAttr( + default=default, + validator=validator, + repr=repr, + cmp=None, + hash=hash, + init=init, + converter=converter, + metadata=metadata, + type=type, + kw_only=kw_only, + eq=eq, + eq_key=eq_key, + order=order, + order_key=order_key, + on_setattr=on_setattr, + alias=alias, + ) + + +def _compile_and_eval( + script: str, + globs: dict[str, Any] | None, + locs: Mapping[str, object] | None = None, + filename: str = "", +) -> None: + """ + Evaluate the script with the given global (globs) and local (locs) + variables. + """ + bytecode = compile(script, filename, "exec") + eval(bytecode, globs, locs) + + +def _linecache_and_compile( + script: str, + filename: str, + globs: dict[str, Any] | None, + locals: Mapping[str, object] | None = None, +) -> dict[str, Any]: + """ + Cache the script with _linecache_, compile it and return the _locals_. + """ + + locs = {} if locals is None else locals + + # In order of debuggers like PDB being able to step through the code, + # we add a fake linecache entry. + count = 1 + base_filename = filename + while True: + linecache_tuple = ( + len(script), + None, + script.splitlines(True), + filename, + ) + old_val = linecache.cache.setdefault(filename, linecache_tuple) + if old_val == linecache_tuple: + break + + filename = f"{base_filename[:-1]}-{count}>" + count += 1 + + _compile_and_eval(script, globs, locs, filename) + + return locs + + +def _make_attr_tuple_class(cls_name: str, attr_names: list[str]) -> type: + """ + Create a tuple subclass to hold `Attribute`s for an `attrs` class. + + The subclass is a bare tuple with properties for names. + + class MyClassAttributes(tuple): + __slots__ = () + x = property(itemgetter(0)) + """ + attr_class_name = f"{cls_name}Attributes" + body = {} + for i, attr_name in enumerate(attr_names): + + def getter(self, i=i): + return self[i] + + body[attr_name] = property(getter) + return type(attr_class_name, (tuple,), body) + + +# Tuple class for extracted attributes from a class definition. +# `base_attrs` is a subset of `attrs`. +class _Attributes(NamedTuple): + attrs: type + base_attrs: list[Attribute] + base_attrs_map: dict[str, type] + + +def _is_class_var(annot): + """ + Check whether *annot* is a typing.ClassVar. + + The string comparison hack is used to avoid evaluating all string + annotations which would put attrs-based classes at a performance + disadvantage compared to plain old classes. + """ + annot = str(annot) + + # Annotation can be quoted. + if annot.startswith(("'", '"')) and annot.endswith(("'", '"')): + annot = annot[1:-1] + + return annot.startswith(_CLASSVAR_PREFIXES) + + +def _has_own_attribute(cls, attrib_name): + """ + Check whether *cls* defines *attrib_name* (and doesn't just inherit it). + """ + return attrib_name in cls.__dict__ + + +def _collect_base_attrs( + cls, taken_attr_names +) -> tuple[list[Attribute], dict[str, type]]: + """ + Collect attr.ibs from base classes of *cls*, except *taken_attr_names*. + """ + base_attrs = [] + base_attr_map = {} # A dictionary of base attrs to their classes. + + # Traverse the MRO and collect attributes. + for base_cls in reversed(cls.__mro__[1:-1]): + for a in getattr(base_cls, "__attrs_attrs__", []): + if a.inherited or a.name in taken_attr_names: + continue + + a = a.evolve(inherited=True) # noqa: PLW2901 + base_attrs.append(a) + base_attr_map[a.name] = base_cls + + # For each name, only keep the freshest definition i.e. the furthest at the + # back. base_attr_map is fine because it gets overwritten with every new + # instance. + filtered = [] + seen = set() + for a in reversed(base_attrs): + if a.name in seen: + continue + filtered.insert(0, a) + seen.add(a.name) + + return filtered, base_attr_map + + +def _collect_base_attrs_broken(cls, taken_attr_names): + """ + Collect attr.ibs from base classes of *cls*, except *taken_attr_names*. + + N.B. *taken_attr_names* will be mutated. + + Adhere to the old incorrect behavior. + + Notably it collects from the front and considers inherited attributes which + leads to the buggy behavior reported in #428. + """ + base_attrs = [] + base_attr_map = {} # A dictionary of base attrs to their classes. + + # Traverse the MRO and collect attributes. + for base_cls in cls.__mro__[1:-1]: + for a in getattr(base_cls, "__attrs_attrs__", []): + if a.name in taken_attr_names: + continue + + a = a.evolve(inherited=True) # noqa: PLW2901 + taken_attr_names.add(a.name) + base_attrs.append(a) + base_attr_map[a.name] = base_cls + + return base_attrs, base_attr_map + + +def _transform_attrs( + cls, + these, + auto_attribs, + kw_only, + collect_by_mro, + field_transformer, +) -> _Attributes: + """ + Transform all `_CountingAttr`s on a class into `Attribute`s. + + If *these* is passed, use that and don't look for them on the class. + + If *collect_by_mro* is True, collect them in the correct MRO order, + otherwise use the old -- incorrect -- order. See #428. + + Return an `_Attributes`. + """ + cd = cls.__dict__ + anns = _get_annotations(cls) + + if these is not None: + ca_list = list(these.items()) + elif auto_attribs is True: + ca_names = { + name + for name, attr in cd.items() + if attr.__class__ is _CountingAttr + } + ca_list = [] + annot_names = set() + for attr_name, type in anns.items(): + if _is_class_var(type): + continue + annot_names.add(attr_name) + a = cd.get(attr_name, NOTHING) + + if a.__class__ is not _CountingAttr: + a = attrib(a) + ca_list.append((attr_name, a)) + + unannotated = ca_names - annot_names + if unannotated: + raise UnannotatedAttributeError( + "The following `attr.ib`s lack a type annotation: " + + ", ".join( + sorted(unannotated, key=lambda n: cd.get(n).counter) + ) + + "." + ) + else: + ca_list = sorted( + ( + (name, attr) + for name, attr in cd.items() + if attr.__class__ is _CountingAttr + ), + key=lambda e: e[1].counter, + ) + + fca = Attribute.from_counting_attr + no = ClassProps.KeywordOnly.NO + own_attrs = [ + fca( + attr_name, + ca, + kw_only is not no, + anns.get(attr_name), + ) + for attr_name, ca in ca_list + ] + + if collect_by_mro: + base_attrs, base_attr_map = _collect_base_attrs( + cls, {a.name for a in own_attrs} + ) + else: + base_attrs, base_attr_map = _collect_base_attrs_broken( + cls, {a.name for a in own_attrs} + ) + + if kw_only is ClassProps.KeywordOnly.FORCE: + own_attrs = [a.evolve(kw_only=True) for a in own_attrs] + base_attrs = [a.evolve(kw_only=True) for a in base_attrs] + + attrs = base_attrs + own_attrs + + if field_transformer is not None: + attrs = tuple(field_transformer(cls, attrs)) + + # Check attr order after executing the field_transformer. + # Mandatory vs non-mandatory attr order only matters when they are part of + # the __init__ signature and when they aren't kw_only (which are moved to + # the end and can be mandatory or non-mandatory in any order, as they will + # be specified as keyword args anyway). Check the order of those attrs: + had_default = False + for a in (a for a in attrs if a.init is not False and a.kw_only is False): + if had_default is True and a.default is NOTHING: + msg = f"No mandatory attributes allowed after an attribute with a default value or factory. Attribute in question: {a!r}" + raise ValueError(msg) + + if had_default is False and a.default is not NOTHING: + had_default = True + + # Resolve default field alias after executing field_transformer. + # This allows field_transformer to differentiate between explicit vs + # default aliases and supply their own defaults. + for a in attrs: + if not a.alias: + # Evolve is very slow, so we hold our nose and do it dirty. + _OBJ_SETATTR.__get__(a)("alias", _default_init_alias_for(a.name)) + + # Create AttrsClass *after* applying the field_transformer since it may + # add or remove attributes! + attr_names = [a.name for a in attrs] + AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names) + + return _Attributes(AttrsClass(attrs), base_attrs, base_attr_map) + + +def _make_cached_property_getattr(cached_properties, original_getattr, cls): + lines = [ + # Wrapped to get `__class__` into closure cell for super() + # (It will be replaced with the newly constructed class after construction). + "def wrapper(_cls):", + " __class__ = _cls", + " def __getattr__(self, item, cached_properties=cached_properties, original_getattr=original_getattr, _cached_setattr_get=_cached_setattr_get):", + " func = cached_properties.get(item)", + " if func is not None:", + " result = func(self)", + " _setter = _cached_setattr_get(self)", + " _setter(item, result)", + " return result", + ] + if original_getattr is not None: + lines.append( + " return original_getattr(self, item)", + ) + else: + lines.extend( + [ + " try:", + " return super().__getattribute__(item)", + " except AttributeError:", + " if not hasattr(super(), '__getattr__'):", + " raise", + " return super().__getattr__(item)", + " original_error = f\"'{self.__class__.__name__}' object has no attribute '{item}'\"", + " raise AttributeError(original_error)", + ] + ) + + lines.extend( + [ + " return __getattr__", + "__getattr__ = wrapper(_cls)", + ] + ) + + unique_filename = _generate_unique_filename(cls, "getattr") + + glob = { + "cached_properties": cached_properties, + "_cached_setattr_get": _OBJ_SETATTR.__get__, + "original_getattr": original_getattr, + } + + return _linecache_and_compile( + "\n".join(lines), unique_filename, glob, locals={"_cls": cls} + )["__getattr__"] + + +def _frozen_setattrs(self, name, value): + """ + Attached to frozen classes as __setattr__. + """ + if isinstance(self, BaseException) and name in ( + "__cause__", + "__context__", + "__traceback__", + "__suppress_context__", + "__notes__", + ): + BaseException.__setattr__(self, name, value) + return + + raise FrozenInstanceError + + +def _frozen_delattrs(self, name): + """ + Attached to frozen classes as __delattr__. + """ + if isinstance(self, BaseException) and name in ("__notes__",): + BaseException.__delattr__(self, name) + return + + raise FrozenInstanceError + + +def evolve(*args, **changes): + """ + Create a new instance, based on the first positional argument with + *changes* applied. + + .. tip:: + + On Python 3.13 and later, you can also use `copy.replace` instead. + + Args: + + inst: + Instance of a class with *attrs* attributes. *inst* must be passed + as a positional argument. + + changes: + Keyword changes in the new copy. + + Returns: + A copy of inst with *changes* incorporated. + + Raises: + TypeError: + If *attr_name* couldn't be found in the class ``__init__``. + + attrs.exceptions.NotAnAttrsClassError: + If *cls* is not an *attrs* class. + + .. versionadded:: 17.1.0 + .. deprecated:: 23.1.0 + It is now deprecated to pass the instance using the keyword argument + *inst*. It will raise a warning until at least April 2024, after which + it will become an error. Always pass the instance as a positional + argument. + .. versionchanged:: 24.1.0 + *inst* can't be passed as a keyword argument anymore. + """ + try: + (inst,) = args + except ValueError: + msg = ( + f"evolve() takes 1 positional argument, but {len(args)} were given" + ) + raise TypeError(msg) from None + + cls = inst.__class__ + attrs = fields(cls) + for a in attrs: + if not a.init: + continue + attr_name = a.name # To deal with private attributes. + init_name = a.alias + if init_name not in changes: + changes[init_name] = getattr(inst, attr_name) + + return cls(**changes) + + +class _ClassBuilder: + """ + Iteratively build *one* class. + """ + + __slots__ = ( + "_add_method_dunders", + "_attr_names", + "_attrs", + "_base_attr_map", + "_base_names", + "_cache_hash", + "_cls", + "_cls_dict", + "_delete_attribs", + "_frozen", + "_has_custom_setattr", + "_has_post_init", + "_has_pre_init", + "_is_exc", + "_on_setattr", + "_pre_init_has_args", + "_repr_added", + "_script_snippets", + "_slots", + "_weakref_slot", + "_wrote_own_setattr", + ) + + def __init__( + self, + cls: type, + these, + auto_attribs: bool, + props: ClassProps, + has_custom_setattr: bool, + ): + attrs, base_attrs, base_map = _transform_attrs( + cls, + these, + auto_attribs, + props.kw_only, + props.collected_fields_by_mro, + props.field_transformer, + ) + + self._cls = cls + self._cls_dict = dict(cls.__dict__) if props.is_slotted else {} + self._attrs = attrs + self._base_names = {a.name for a in base_attrs} + self._base_attr_map = base_map + self._attr_names = tuple(a.name for a in attrs) + self._slots = props.is_slotted + self._frozen = props.is_frozen + self._weakref_slot = props.has_weakref_slot + self._cache_hash = ( + props.hashability is ClassProps.Hashability.HASHABLE_CACHED + ) + self._has_pre_init = bool(getattr(cls, "__attrs_pre_init__", False)) + self._pre_init_has_args = False + if self._has_pre_init: + # Check if the pre init method has more arguments than just `self` + # We want to pass arguments if pre init expects arguments + pre_init_func = cls.__attrs_pre_init__ + pre_init_signature = inspect.signature(pre_init_func) + self._pre_init_has_args = len(pre_init_signature.parameters) > 1 + self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False)) + self._delete_attribs = not bool(these) + self._is_exc = props.is_exception + self._on_setattr = props.on_setattr_hook + + self._has_custom_setattr = has_custom_setattr + self._wrote_own_setattr = False + + self._cls_dict["__attrs_attrs__"] = self._attrs + self._cls_dict["__attrs_props__"] = props + + if props.is_frozen: + self._cls_dict["__setattr__"] = _frozen_setattrs + self._cls_dict["__delattr__"] = _frozen_delattrs + + self._wrote_own_setattr = True + elif self._on_setattr in ( + _DEFAULT_ON_SETATTR, + setters.validate, + setters.convert, + ): + has_validator = has_converter = False + for a in attrs: + if a.validator is not None: + has_validator = True + if a.converter is not None: + has_converter = True + + if has_validator and has_converter: + break + if ( + ( + self._on_setattr == _DEFAULT_ON_SETATTR + and not (has_validator or has_converter) + ) + or (self._on_setattr == setters.validate and not has_validator) + or (self._on_setattr == setters.convert and not has_converter) + ): + # If class-level on_setattr is set to convert + validate, but + # there's no field to convert or validate, pretend like there's + # no on_setattr. + self._on_setattr = None + + if props.added_pickling: + ( + self._cls_dict["__getstate__"], + self._cls_dict["__setstate__"], + ) = self._make_getstate_setstate() + + # tuples of script, globs, hook + self._script_snippets: list[ + tuple[str, dict, Callable[[dict, dict], Any]] + ] = [] + self._repr_added = False + + # We want to only do this check once; in 99.9% of cases these + # exist. + if not hasattr(self._cls, "__module__") or not hasattr( + self._cls, "__qualname__" + ): + self._add_method_dunders = self._add_method_dunders_safe + else: + self._add_method_dunders = self._add_method_dunders_unsafe + + def __repr__(self): + return f"<_ClassBuilder(cls={self._cls.__name__})>" + + def _eval_snippets(self) -> None: + """ + Evaluate any registered snippets in one go. + """ + script = "\n".join([snippet[0] for snippet in self._script_snippets]) + globs = {} + for _, snippet_globs, _ in self._script_snippets: + globs.update(snippet_globs) + + locs = _linecache_and_compile( + script, + _generate_unique_filename(self._cls, "methods"), + globs, + ) + + for _, _, hook in self._script_snippets: + hook(self._cls_dict, locs) + + def build_class(self): + """ + Finalize class based on the accumulated configuration. + + Builder cannot be used after calling this method. + """ + self._eval_snippets() + if self._slots is True: + cls = self._create_slots_class() + self._cls.__attrs_base_of_slotted__ = weakref.ref(cls) + else: + cls = self._patch_original_class() + if PY_3_10_PLUS: + cls = abc.update_abstractmethods(cls) + + # The method gets only called if it's not inherited from a base class. + # _has_own_attribute does NOT work properly for classmethods. + if ( + getattr(cls, "__attrs_init_subclass__", None) + and "__attrs_init_subclass__" not in cls.__dict__ + ): + cls.__attrs_init_subclass__() + + return cls + + def _patch_original_class(self): + """ + Apply accumulated methods and return the class. + """ + cls = self._cls + base_names = self._base_names + + # Clean class of attribute definitions (`attr.ib()`s). + if self._delete_attribs: + for name in self._attr_names: + if ( + name not in base_names + and getattr(cls, name, _SENTINEL) is not _SENTINEL + ): + # An AttributeError can happen if a base class defines a + # class variable and we want to set an attribute with the + # same name by using only a type annotation. + with contextlib.suppress(AttributeError): + delattr(cls, name) + + # Attach our dunder methods. + for name, value in self._cls_dict.items(): + setattr(cls, name, value) + + # If we've inherited an attrs __setattr__ and don't write our own, + # reset it to object's. + if not self._wrote_own_setattr and getattr( + cls, "__attrs_own_setattr__", False + ): + cls.__attrs_own_setattr__ = False + + if not self._has_custom_setattr: + cls.__setattr__ = _OBJ_SETATTR + + return cls + + def _create_slots_class(self): + """ + Build and return a new class with a `__slots__` attribute. + """ + cd = { + k: v + for k, v in self._cls_dict.items() + if k not in (*tuple(self._attr_names), "__dict__", "__weakref__") + } + + # 3.14.0rc2+ + if hasattr(sys, "_clear_type_descriptors"): + sys._clear_type_descriptors(self._cls) + + # If our class doesn't have its own implementation of __setattr__ + # (either from the user or by us), check the bases, if one of them has + # an attrs-made __setattr__, that needs to be reset. We don't walk the + # MRO because we only care about our immediate base classes. + # XXX: This can be confused by subclassing a slotted attrs class with + # XXX: a non-attrs class and subclass the resulting class with an attrs + # XXX: class. See `test_slotted_confused` for details. For now that's + # XXX: OK with us. + if not self._wrote_own_setattr: + cd["__attrs_own_setattr__"] = False + + if not self._has_custom_setattr: + for base_cls in self._cls.__bases__: + if base_cls.__dict__.get("__attrs_own_setattr__", False): + cd["__setattr__"] = _OBJ_SETATTR + break + + # Traverse the MRO to collect existing slots + # and check for an existing __weakref__. + existing_slots = {} + weakref_inherited = False + for base_cls in self._cls.__mro__[1:-1]: + if base_cls.__dict__.get("__weakref__", None) is not None: + weakref_inherited = True + existing_slots.update( + { + name: getattr(base_cls, name) + for name in getattr(base_cls, "__slots__", []) + } + ) + + base_names = set(self._base_names) + + names = self._attr_names + if ( + self._weakref_slot + and "__weakref__" not in getattr(self._cls, "__slots__", ()) + and "__weakref__" not in names + and not weakref_inherited + ): + names += ("__weakref__",) + + cached_properties = { + name: cached_prop.func + for name, cached_prop in cd.items() + if isinstance(cached_prop, cached_property) + } + + # Collect methods with a `__class__` reference that are shadowed in the new class. + # To know to update them. + additional_closure_functions_to_update = [] + if cached_properties: + class_annotations = _get_annotations(self._cls) + for name, func in cached_properties.items(): + # Add cached properties to names for slotting. + names += (name,) + # Clear out function from class to avoid clashing. + del cd[name] + additional_closure_functions_to_update.append(func) + annotation = inspect.signature(func).return_annotation + if annotation is not inspect.Parameter.empty: + class_annotations[name] = annotation + + original_getattr = cd.get("__getattr__") + if original_getattr is not None: + additional_closure_functions_to_update.append(original_getattr) + + cd["__getattr__"] = _make_cached_property_getattr( + cached_properties, original_getattr, self._cls + ) + + # We only add the names of attributes that aren't inherited. + # Setting __slots__ to inherited attributes wastes memory. + slot_names = [name for name in names if name not in base_names] + + # There are slots for attributes from current class + # that are defined in parent classes. + # As their descriptors may be overridden by a child class, + # we collect them here and update the class dict + reused_slots = { + slot: slot_descriptor + for slot, slot_descriptor in existing_slots.items() + if slot in slot_names + } + slot_names = [name for name in slot_names if name not in reused_slots] + cd.update(reused_slots) + if self._cache_hash: + slot_names.append(_HASH_CACHE_FIELD) + + cd["__slots__"] = tuple(slot_names) + + cd["__qualname__"] = self._cls.__qualname__ + + # Create new class based on old class and our methods. + cls = type(self._cls)(self._cls.__name__, self._cls.__bases__, cd) + + # The following is a fix for + # . + # If a method mentions `__class__` or uses the no-arg super(), the + # compiler will bake a reference to the class in the method itself + # as `method.__closure__`. Since we replace the class with a + # clone, we rewrite these references so it keeps working. + for item in itertools.chain( + cls.__dict__.values(), additional_closure_functions_to_update + ): + if isinstance(item, (classmethod, staticmethod)): + # Class- and staticmethods hide their functions inside. + # These might need to be rewritten as well. + closure_cells = getattr(item.__func__, "__closure__", None) + elif isinstance(item, property): + # Workaround for property `super()` shortcut (PY3-only). + # There is no universal way for other descriptors. + closure_cells = getattr(item.fget, "__closure__", None) + else: + closure_cells = getattr(item, "__closure__", None) + + if not closure_cells: # Catch None or the empty list. + continue + for cell in closure_cells: + try: + match = cell.cell_contents is self._cls + except ValueError: # noqa: PERF203 + # ValueError: Cell is empty + pass + else: + if match: + cell.cell_contents = cls + return cls + + def add_repr(self, ns): + script, globs = _make_repr_script(self._attrs, ns) + + def _attach_repr(cls_dict, globs): + cls_dict["__repr__"] = self._add_method_dunders(globs["__repr__"]) + + self._script_snippets.append((script, globs, _attach_repr)) + self._repr_added = True + return self + + def add_str(self): + if not self._repr_added: + msg = "__str__ can only be generated if a __repr__ exists." + raise ValueError(msg) + + def __str__(self): + return self.__repr__() + + self._cls_dict["__str__"] = self._add_method_dunders(__str__) + return self + + def _make_getstate_setstate(self): + """ + Create custom __setstate__ and __getstate__ methods. + """ + # __weakref__ is not writable. + state_attr_names = tuple( + an for an in self._attr_names if an != "__weakref__" + ) + + def slots_getstate(self): + """ + Automatically created by attrs. + """ + return {name: getattr(self, name) for name in state_attr_names} + + hash_caching_enabled = self._cache_hash + + def slots_setstate(self, state): + """ + Automatically created by attrs. + """ + __bound_setattr = _OBJ_SETATTR.__get__(self) + if isinstance(state, tuple): + # Backward compatibility with attrs instances pickled with + # attrs versions before v22.2.0 which stored tuples. + for name, value in zip(state_attr_names, state): + __bound_setattr(name, value) + else: + for name in state_attr_names: + if name in state: + __bound_setattr(name, state[name]) + + # The hash code cache is not included when the object is + # serialized, but it still needs to be initialized to None to + # indicate that the first call to __hash__ should be a cache + # miss. + if hash_caching_enabled: + __bound_setattr(_HASH_CACHE_FIELD, None) + + return slots_getstate, slots_setstate + + def make_unhashable(self): + self._cls_dict["__hash__"] = None + return self + + def add_hash(self): + script, globs = _make_hash_script( + self._cls, + self._attrs, + frozen=self._frozen, + cache_hash=self._cache_hash, + ) + + def attach_hash(cls_dict: dict, locs: dict) -> None: + cls_dict["__hash__"] = self._add_method_dunders(locs["__hash__"]) + + self._script_snippets.append((script, globs, attach_hash)) + + return self + + def add_init(self): + script, globs, annotations = _make_init_script( + self._cls, + self._attrs, + self._has_pre_init, + self._pre_init_has_args, + self._has_post_init, + self._frozen, + self._slots, + self._cache_hash, + self._base_attr_map, + self._is_exc, + self._on_setattr, + attrs_init=False, + ) + + def _attach_init(cls_dict, globs): + init = globs["__init__"] + init.__annotations__ = annotations + cls_dict["__init__"] = self._add_method_dunders(init) + + self._script_snippets.append((script, globs, _attach_init)) + + return self + + def add_replace(self): + self._cls_dict["__replace__"] = self._add_method_dunders( + lambda self, **changes: evolve(self, **changes) + ) + return self + + def add_match_args(self): + self._cls_dict["__match_args__"] = tuple( + field.name + for field in self._attrs + if field.init and not field.kw_only + ) + + def add_attrs_init(self): + script, globs, annotations = _make_init_script( + self._cls, + self._attrs, + self._has_pre_init, + self._pre_init_has_args, + self._has_post_init, + self._frozen, + self._slots, + self._cache_hash, + self._base_attr_map, + self._is_exc, + self._on_setattr, + attrs_init=True, + ) + + def _attach_attrs_init(cls_dict, globs): + init = globs["__attrs_init__"] + init.__annotations__ = annotations + cls_dict["__attrs_init__"] = self._add_method_dunders(init) + + self._script_snippets.append((script, globs, _attach_attrs_init)) + + return self + + def add_eq(self): + cd = self._cls_dict + + script, globs = _make_eq_script(self._attrs) + + def _attach_eq(cls_dict, globs): + cls_dict["__eq__"] = self._add_method_dunders(globs["__eq__"]) + + self._script_snippets.append((script, globs, _attach_eq)) + + cd["__ne__"] = __ne__ + + return self + + def add_order(self): + cd = self._cls_dict + + cd["__lt__"], cd["__le__"], cd["__gt__"], cd["__ge__"] = ( + self._add_method_dunders(meth) + for meth in _make_order(self._cls, self._attrs) + ) + + return self + + def add_setattr(self): + sa_attrs = {} + for a in self._attrs: + on_setattr = a.on_setattr or self._on_setattr + if on_setattr and on_setattr is not setters.NO_OP: + sa_attrs[a.name] = a, on_setattr + + if not sa_attrs: + return self + + if self._has_custom_setattr: + # We need to write a __setattr__ but there already is one! + msg = "Can't combine custom __setattr__ with on_setattr hooks." + raise ValueError(msg) + + # docstring comes from _add_method_dunders + def __setattr__(self, name, val): + try: + a, hook = sa_attrs[name] + except KeyError: + nval = val + else: + nval = hook(self, a, val) + + _OBJ_SETATTR(self, name, nval) + + self._cls_dict["__attrs_own_setattr__"] = True + self._cls_dict["__setattr__"] = self._add_method_dunders(__setattr__) + self._wrote_own_setattr = True + + return self + + def _add_method_dunders_unsafe(self, method: Callable) -> Callable: + """ + Add __module__ and __qualname__ to a *method*. + """ + method.__module__ = self._cls.__module__ + + method.__qualname__ = f"{self._cls.__qualname__}.{method.__name__}" + + method.__doc__ = ( + f"Method generated by attrs for class {self._cls.__qualname__}." + ) + + return method + + def _add_method_dunders_safe(self, method: Callable) -> Callable: + """ + Add __module__ and __qualname__ to a *method* if possible. + """ + with contextlib.suppress(AttributeError): + method.__module__ = self._cls.__module__ + + with contextlib.suppress(AttributeError): + method.__qualname__ = f"{self._cls.__qualname__}.{method.__name__}" + + with contextlib.suppress(AttributeError): + method.__doc__ = f"Method generated by attrs for class {self._cls.__qualname__}." + + return method + + +def _determine_attrs_eq_order(cmp, eq, order, default_eq): + """ + Validate the combination of *cmp*, *eq*, and *order*. Derive the effective + values of eq and order. If *eq* is None, set it to *default_eq*. + """ + if cmp is not None and any((eq is not None, order is not None)): + msg = "Don't mix `cmp` with `eq' and `order`." + raise ValueError(msg) + + # cmp takes precedence due to bw-compatibility. + if cmp is not None: + return cmp, cmp + + # If left None, equality is set to the specified default and ordering + # mirrors equality. + if eq is None: + eq = default_eq + + if order is None: + order = eq + + if eq is False and order is True: + msg = "`order` can only be True if `eq` is True too." + raise ValueError(msg) + + return eq, order + + +def _determine_attrib_eq_order(cmp, eq, order, default_eq): + """ + Validate the combination of *cmp*, *eq*, and *order*. Derive the effective + values of eq and order. If *eq* is None, set it to *default_eq*. + """ + if cmp is not None and any((eq is not None, order is not None)): + msg = "Don't mix `cmp` with `eq' and `order`." + raise ValueError(msg) + + def decide_callable_or_boolean(value): + """ + Decide whether a key function is used. + """ + if callable(value): + value, key = True, value + else: + key = None + return value, key + + # cmp takes precedence due to bw-compatibility. + if cmp is not None: + cmp, cmp_key = decide_callable_or_boolean(cmp) + return cmp, cmp_key, cmp, cmp_key + + # If left None, equality is set to the specified default and ordering + # mirrors equality. + if eq is None: + eq, eq_key = default_eq, None + else: + eq, eq_key = decide_callable_or_boolean(eq) + + if order is None: + order, order_key = eq, eq_key + else: + order, order_key = decide_callable_or_boolean(order) + + if eq is False and order is True: + msg = "`order` can only be True if `eq` is True too." + raise ValueError(msg) + + return eq, eq_key, order, order_key + + +def _determine_whether_to_implement( + cls, flag, auto_detect, dunders, default=True +): + """ + Check whether we should implement a set of methods for *cls*. + + *flag* is the argument passed into @attr.s like 'init', *auto_detect* the + same as passed into @attr.s and *dunders* is a tuple of attribute names + whose presence signal that the user has implemented it themselves. + + Return *default* if no reason for either for or against is found. + """ + if flag is True or flag is False: + return flag + + if flag is None and auto_detect is False: + return default + + # Logically, flag is None and auto_detect is True here. + for dunder in dunders: + if _has_own_attribute(cls, dunder): + return False + + return default + + +def attrs( + maybe_cls=None, + these=None, + repr_ns=None, + repr=None, + cmp=None, + hash=None, + init=None, + slots=False, + frozen=False, + weakref_slot=True, + str=False, + auto_attribs=False, + kw_only=False, + cache_hash=False, + auto_exc=False, + eq=None, + order=None, + auto_detect=False, + collect_by_mro=False, + getstate_setstate=None, + on_setattr=None, + field_transformer=None, + match_args=True, + unsafe_hash=None, + force_kw_only=True, +): + r""" + A class decorator that adds :term:`dunder methods` according to the + specified attributes using `attr.ib` or the *these* argument. + + Consider using `attrs.define` / `attrs.frozen` in new code (``attr.s`` will + *never* go away, though). + + Args: + repr_ns (str): + When using nested classes, there was no way in Python 2 to + automatically detect that. This argument allows to set a custom + name for a more meaningful ``repr`` output. This argument is + pointless in Python 3 and is therefore deprecated. + + .. caution:: + Refer to `attrs.define` for the rest of the parameters, but note that they + can have different defaults. + + Notably, leaving *on_setattr* as `None` will **not** add any hooks. + + .. versionadded:: 16.0.0 *slots* + .. versionadded:: 16.1.0 *frozen* + .. versionadded:: 16.3.0 *str* + .. versionadded:: 16.3.0 Support for ``__attrs_post_init__``. + .. versionchanged:: 17.1.0 + *hash* supports `None` as value which is also the default now. + .. versionadded:: 17.3.0 *auto_attribs* + .. versionchanged:: 18.1.0 + If *these* is passed, no attributes are deleted from the class body. + .. versionchanged:: 18.1.0 If *these* is ordered, the order is retained. + .. versionadded:: 18.2.0 *weakref_slot* + .. deprecated:: 18.2.0 + ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a + `DeprecationWarning` if the classes compared are subclasses of + each other. ``__eq`` and ``__ne__`` never tried to compared subclasses + to each other. + .. versionchanged:: 19.2.0 + ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now do not consider + subclasses comparable anymore. + .. versionadded:: 18.2.0 *kw_only* + .. versionadded:: 18.2.0 *cache_hash* + .. versionadded:: 19.1.0 *auto_exc* + .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. + .. versionadded:: 19.2.0 *eq* and *order* + .. versionadded:: 20.1.0 *auto_detect* + .. versionadded:: 20.1.0 *collect_by_mro* + .. versionadded:: 20.1.0 *getstate_setstate* + .. versionadded:: 20.1.0 *on_setattr* + .. versionadded:: 20.3.0 *field_transformer* + .. versionchanged:: 21.1.0 + ``init=False`` injects ``__attrs_init__`` + .. versionchanged:: 21.1.0 Support for ``__attrs_pre_init__`` + .. versionchanged:: 21.1.0 *cmp* undeprecated + .. versionadded:: 21.3.0 *match_args* + .. versionadded:: 22.2.0 + *unsafe_hash* as an alias for *hash* (for :pep:`681` compliance). + .. deprecated:: 24.1.0 *repr_ns* + .. versionchanged:: 24.1.0 + Instances are not compared as tuples of attributes anymore, but using a + big ``and`` condition. This is faster and has more correct behavior for + uncomparable values like `math.nan`. + .. versionadded:: 24.1.0 + If a class has an *inherited* classmethod called + ``__attrs_init_subclass__``, it is executed after the class is created. + .. deprecated:: 24.1.0 *hash* is deprecated in favor of *unsafe_hash*. + .. versionchanged:: 25.4.0 + *kw_only* now only applies to attributes defined in the current class, + and respects attribute-level ``kw_only=False`` settings. + .. versionadded:: 25.4.0 *force_kw_only* + """ + if repr_ns is not None: + import warnings + + warnings.warn( + DeprecationWarning( + "The `repr_ns` argument is deprecated and will be removed in or after August 2025." + ), + stacklevel=2, + ) + + eq_, order_ = _determine_attrs_eq_order(cmp, eq, order, None) + + # unsafe_hash takes precedence due to PEP 681. + if unsafe_hash is not None: + hash = unsafe_hash + + if isinstance(on_setattr, (list, tuple)): + on_setattr = setters.pipe(*on_setattr) + + def wrap(cls): + nonlocal hash + is_frozen = frozen or _has_frozen_base_class(cls) + is_exc = auto_exc is True and issubclass(cls, BaseException) + has_own_setattr = auto_detect and _has_own_attribute( + cls, "__setattr__" + ) + + if has_own_setattr and is_frozen: + msg = "Can't freeze a class with a custom __setattr__." + raise ValueError(msg) + + eq = not is_exc and _determine_whether_to_implement( + cls, eq_, auto_detect, ("__eq__", "__ne__") + ) + + Hashability = ClassProps.Hashability + + if is_exc: + hashability = Hashability.LEAVE_ALONE + elif hash is True: + hashability = ( + Hashability.HASHABLE_CACHED + if cache_hash + else Hashability.HASHABLE + ) + elif hash is False: + hashability = Hashability.LEAVE_ALONE + elif hash is None: + if auto_detect is True and _has_own_attribute(cls, "__hash__"): + hashability = Hashability.LEAVE_ALONE + elif eq is True and is_frozen is True: + hashability = ( + Hashability.HASHABLE_CACHED + if cache_hash + else Hashability.HASHABLE + ) + elif eq is False: + hashability = Hashability.LEAVE_ALONE + else: + hashability = Hashability.UNHASHABLE + else: + msg = "Invalid value for hash. Must be True, False, or None." + raise TypeError(msg) + + KeywordOnly = ClassProps.KeywordOnly + if kw_only: + kwo = KeywordOnly.FORCE if force_kw_only else KeywordOnly.YES + else: + kwo = KeywordOnly.NO + + props = ClassProps( + is_exception=is_exc, + is_frozen=is_frozen, + is_slotted=slots, + collected_fields_by_mro=collect_by_mro, + added_init=_determine_whether_to_implement( + cls, init, auto_detect, ("__init__",) + ), + added_repr=_determine_whether_to_implement( + cls, repr, auto_detect, ("__repr__",) + ), + added_eq=eq, + added_ordering=not is_exc + and _determine_whether_to_implement( + cls, + order_, + auto_detect, + ("__lt__", "__le__", "__gt__", "__ge__"), + ), + hashability=hashability, + added_match_args=match_args, + kw_only=kwo, + has_weakref_slot=weakref_slot, + added_str=str, + added_pickling=_determine_whether_to_implement( + cls, + getstate_setstate, + auto_detect, + ("__getstate__", "__setstate__"), + default=slots, + ), + on_setattr_hook=on_setattr, + field_transformer=field_transformer, + ) + + if not props.is_hashable and cache_hash: + msg = "Invalid value for cache_hash. To use hash caching, hashing must be either explicitly or implicitly enabled." + raise TypeError(msg) + + builder = _ClassBuilder( + cls, + these, + auto_attribs=auto_attribs, + props=props, + has_custom_setattr=has_own_setattr, + ) + + if props.added_repr: + builder.add_repr(repr_ns) + + if props.added_str: + builder.add_str() + + if props.added_eq: + builder.add_eq() + if props.added_ordering: + builder.add_order() + + if not frozen: + builder.add_setattr() + + if props.is_hashable: + builder.add_hash() + elif props.hashability is Hashability.UNHASHABLE: + builder.make_unhashable() + + if props.added_init: + builder.add_init() + else: + builder.add_attrs_init() + if cache_hash: + msg = "Invalid value for cache_hash. To use hash caching, init must be True." + raise TypeError(msg) + + if PY_3_13_PLUS and not _has_own_attribute(cls, "__replace__"): + builder.add_replace() + + if ( + PY_3_10_PLUS + and match_args + and not _has_own_attribute(cls, "__match_args__") + ): + builder.add_match_args() + + return builder.build_class() + + # maybe_cls's type depends on the usage of the decorator. It's a class + # if it's used as `@attrs` but `None` if used as `@attrs()`. + if maybe_cls is None: + return wrap + + return wrap(maybe_cls) + + +_attrs = attrs +""" +Internal alias so we can use it in functions that take an argument called +*attrs*. +""" + + +def _has_frozen_base_class(cls): + """ + Check whether *cls* has a frozen ancestor by looking at its + __setattr__. + """ + return cls.__setattr__ is _frozen_setattrs + + +def _generate_unique_filename(cls: type, func_name: str) -> str: + """ + Create a "filename" suitable for a function being generated. + """ + return ( + f"" + ) + + +def _make_hash_script( + cls: type, attrs: list[Attribute], frozen: bool, cache_hash: bool +) -> tuple[str, dict]: + attrs = tuple( + a for a in attrs if a.hash is True or (a.hash is None and a.eq is True) + ) + + tab = " " + + type_hash = hash(_generate_unique_filename(cls, "hash")) + # If eq is custom generated, we need to include the functions in globs + globs = {} + + hash_def = "def __hash__(self" + hash_func = "hash((" + closing_braces = "))" + if not cache_hash: + hash_def += "):" + else: + hash_def += ", *" + + hash_def += ", _cache_wrapper=__import__('attr._make')._make._CacheHashWrapper):" + hash_func = "_cache_wrapper(" + hash_func + closing_braces += ")" + + method_lines = [hash_def] + + def append_hash_computation_lines(prefix, indent): + """ + Generate the code for actually computing the hash code. + Below this will either be returned directly or used to compute + a value which is then cached, depending on the value of cache_hash + """ + + method_lines.extend( + [ + indent + prefix + hash_func, + indent + f" {type_hash},", + ] + ) + + for a in attrs: + if a.eq_key: + cmp_name = f"_{a.name}_key" + globs[cmp_name] = a.eq_key + method_lines.append( + indent + f" {cmp_name}(self.{a.name})," + ) + else: + method_lines.append(indent + f" self.{a.name},") + + method_lines.append(indent + " " + closing_braces) + + if cache_hash: + method_lines.append(tab + f"if self.{_HASH_CACHE_FIELD} is None:") + if frozen: + append_hash_computation_lines( + f"object.__setattr__(self, '{_HASH_CACHE_FIELD}', ", tab * 2 + ) + method_lines.append(tab * 2 + ")") # close __setattr__ + else: + append_hash_computation_lines( + f"self.{_HASH_CACHE_FIELD} = ", tab * 2 + ) + method_lines.append(tab + f"return self.{_HASH_CACHE_FIELD}") + else: + append_hash_computation_lines("return ", tab) + + script = "\n".join(method_lines) + return script, globs + + +def _add_hash(cls: type, attrs: list[Attribute]): + """ + Add a hash method to *cls*. + """ + script, globs = _make_hash_script( + cls, attrs, frozen=False, cache_hash=False + ) + _compile_and_eval( + script, globs, filename=_generate_unique_filename(cls, "__hash__") + ) + cls.__hash__ = globs["__hash__"] + return cls + + +def __ne__(self, other): + """ + Check equality and either forward a NotImplemented or + return the result negated. + """ + result = self.__eq__(other) + if result is NotImplemented: + return NotImplemented + + return not result + + +def _make_eq_script(attrs: list) -> tuple[str, dict]: + """ + Create __eq__ method for *cls* with *attrs*. + """ + attrs = [a for a in attrs if a.eq] + + lines = [ + "def __eq__(self, other):", + " if other.__class__ is not self.__class__:", + " return NotImplemented", + ] + + globs = {} + if attrs: + lines.append(" return (") + for a in attrs: + if a.eq_key: + cmp_name = f"_{a.name}_key" + # Add the key function to the global namespace + # of the evaluated function. + globs[cmp_name] = a.eq_key + lines.append( + f" {cmp_name}(self.{a.name}) == {cmp_name}(other.{a.name})" + ) + else: + lines.append(f" self.{a.name} == other.{a.name}") + if a is not attrs[-1]: + lines[-1] = f"{lines[-1]} and" + lines.append(" )") + else: + lines.append(" return True") + + script = "\n".join(lines) + + return script, globs + + +def _make_order(cls, attrs): + """ + Create ordering methods for *cls* with *attrs*. + """ + attrs = [a for a in attrs if a.order] + + def attrs_to_tuple(obj): + """ + Save us some typing. + """ + return tuple( + key(value) if key else value + for value, key in ( + (getattr(obj, a.name), a.order_key) for a in attrs + ) + ) + + def __lt__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) < attrs_to_tuple(other) + + return NotImplemented + + def __le__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) <= attrs_to_tuple(other) + + return NotImplemented + + def __gt__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) > attrs_to_tuple(other) + + return NotImplemented + + def __ge__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) >= attrs_to_tuple(other) + + return NotImplemented + + return __lt__, __le__, __gt__, __ge__ + + +def _add_eq(cls, attrs=None): + """ + Add equality methods to *cls* with *attrs*. + """ + if attrs is None: + attrs = cls.__attrs_attrs__ + + script, globs = _make_eq_script(attrs) + _compile_and_eval( + script, globs, filename=_generate_unique_filename(cls, "__eq__") + ) + cls.__eq__ = globs["__eq__"] + cls.__ne__ = __ne__ + + return cls + + +def _make_repr_script(attrs, ns) -> tuple[str, dict]: + """ + Create the source and globs for a __repr__ and return it. + """ + # Figure out which attributes to include, and which function to use to + # format them. The a.repr value can be either bool or a custom + # callable. + attr_names_with_reprs = tuple( + (a.name, (repr if a.repr is True else a.repr), a.init) + for a in attrs + if a.repr is not False + ) + globs = { + name + "_repr": r for name, r, _ in attr_names_with_reprs if r != repr + } + globs["_compat"] = _compat + globs["AttributeError"] = AttributeError + globs["NOTHING"] = NOTHING + attribute_fragments = [] + for name, r, i in attr_names_with_reprs: + accessor = ( + "self." + name if i else 'getattr(self, "' + name + '", NOTHING)' + ) + fragment = ( + "%s={%s!r}" % (name, accessor) + if r == repr + else "%s={%s_repr(%s)}" % (name, name, accessor) + ) + attribute_fragments.append(fragment) + repr_fragment = ", ".join(attribute_fragments) + + if ns is None: + cls_name_fragment = '{self.__class__.__qualname__.rsplit(">.", 1)[-1]}' + else: + cls_name_fragment = ns + ".{self.__class__.__name__}" + + lines = [ + "def __repr__(self):", + " try:", + " already_repring = _compat.repr_context.already_repring", + " except AttributeError:", + " already_repring = {id(self),}", + " _compat.repr_context.already_repring = already_repring", + " else:", + " if id(self) in already_repring:", + " return '...'", + " else:", + " already_repring.add(id(self))", + " try:", + f" return f'{cls_name_fragment}({repr_fragment})'", + " finally:", + " already_repring.remove(id(self))", + ] + + return "\n".join(lines), globs + + +def _add_repr(cls, ns=None, attrs=None): + """ + Add a repr method to *cls*. + """ + if attrs is None: + attrs = cls.__attrs_attrs__ + + script, globs = _make_repr_script(attrs, ns) + _compile_and_eval( + script, globs, filename=_generate_unique_filename(cls, "__repr__") + ) + cls.__repr__ = globs["__repr__"] + return cls + + +def fields(cls): + """ + Return the tuple of *attrs* attributes for a class. + + The tuple also allows accessing the fields by their names (see below for + examples). + + Args: + cls (type): Class to introspect. + + Raises: + TypeError: If *cls* is not a class. + + attrs.exceptions.NotAnAttrsClassError: + If *cls* is not an *attrs* class. + + Returns: + tuple (with name accessors) of `attrs.Attribute` + + .. versionchanged:: 16.2.0 Returned tuple allows accessing the fields + by name. + .. versionchanged:: 23.1.0 Add support for generic classes. + """ + generic_base = get_generic_base(cls) + + if generic_base is None and not isinstance(cls, type): + msg = "Passed object must be a class." + raise TypeError(msg) + + attrs = getattr(cls, "__attrs_attrs__", None) + + if attrs is None: + if generic_base is not None: + attrs = getattr(generic_base, "__attrs_attrs__", None) + if attrs is not None: + # Even though this is global state, stick it on here to speed + # it up. We rely on `cls` being cached for this to be + # efficient. + cls.__attrs_attrs__ = attrs + return attrs + msg = f"{cls!r} is not an attrs-decorated class." + raise NotAnAttrsClassError(msg) + + return attrs + + +def fields_dict(cls): + """ + Return an ordered dictionary of *attrs* attributes for a class, whose keys + are the attribute names. + + Args: + cls (type): Class to introspect. + + Raises: + TypeError: If *cls* is not a class. + + attrs.exceptions.NotAnAttrsClassError: + If *cls* is not an *attrs* class. + + Returns: + dict[str, attrs.Attribute]: Dict of attribute name to definition + + .. versionadded:: 18.1.0 + """ + if not isinstance(cls, type): + msg = "Passed object must be a class." + raise TypeError(msg) + attrs = getattr(cls, "__attrs_attrs__", None) + if attrs is None: + msg = f"{cls!r} is not an attrs-decorated class." + raise NotAnAttrsClassError(msg) + return {a.name: a for a in attrs} + + +def validate(inst): + """ + Validate all attributes on *inst* that have a validator. + + Leaves all exceptions through. + + Args: + inst: Instance of a class with *attrs* attributes. + """ + if _config._run_validators is False: + return + + for a in fields(inst.__class__): + v = a.validator + if v is not None: + v(inst, a, getattr(inst, a.name)) + + +def _is_slot_attr(a_name, base_attr_map): + """ + Check if the attribute name comes from a slot class. + """ + cls = base_attr_map.get(a_name) + return cls and "__slots__" in cls.__dict__ + + +def _make_init_script( + cls, + attrs, + pre_init, + pre_init_has_args, + post_init, + frozen, + slots, + cache_hash, + base_attr_map, + is_exc, + cls_on_setattr, + attrs_init, +) -> tuple[str, dict, dict]: + has_cls_on_setattr = ( + cls_on_setattr is not None and cls_on_setattr is not setters.NO_OP + ) + + if frozen and has_cls_on_setattr: + msg = "Frozen classes can't use on_setattr." + raise ValueError(msg) + + needs_cached_setattr = cache_hash or frozen + filtered_attrs = [] + attr_dict = {} + for a in attrs: + if not a.init and a.default is NOTHING: + continue + + filtered_attrs.append(a) + attr_dict[a.name] = a + + if a.on_setattr is not None: + if frozen is True: + msg = "Frozen classes can't use on_setattr." + raise ValueError(msg) + + needs_cached_setattr = True + elif has_cls_on_setattr and a.on_setattr is not setters.NO_OP: + needs_cached_setattr = True + + script, globs, annotations = _attrs_to_init_script( + filtered_attrs, + frozen, + slots, + pre_init, + pre_init_has_args, + post_init, + cache_hash, + base_attr_map, + is_exc, + needs_cached_setattr, + has_cls_on_setattr, + "__attrs_init__" if attrs_init else "__init__", + ) + if cls.__module__ in sys.modules: + # This makes typing.get_type_hints(CLS.__init__) resolve string types. + globs.update(sys.modules[cls.__module__].__dict__) + + globs.update({"NOTHING": NOTHING, "attr_dict": attr_dict}) + + if needs_cached_setattr: + # Save the lookup overhead in __init__ if we need to circumvent + # setattr hooks. + globs["_cached_setattr_get"] = _OBJ_SETATTR.__get__ + + return script, globs, annotations + + +def _setattr(attr_name: str, value_var: str, has_on_setattr: bool) -> str: + """ + Use the cached object.setattr to set *attr_name* to *value_var*. + """ + return f"_setattr('{attr_name}', {value_var})" + + +def _setattr_with_converter( + attr_name: str, value_var: str, has_on_setattr: bool, converter: Converter +) -> str: + """ + Use the cached object.setattr to set *attr_name* to *value_var*, but run + its converter first. + """ + return f"_setattr('{attr_name}', {converter._fmt_converter_call(attr_name, value_var)})" + + +def _assign(attr_name: str, value: str, has_on_setattr: bool) -> str: + """ + Unless *attr_name* has an on_setattr hook, use normal assignment. Otherwise + relegate to _setattr. + """ + if has_on_setattr: + return _setattr(attr_name, value, True) + + return f"self.{attr_name} = {value}" + + +def _assign_with_converter( + attr_name: str, value_var: str, has_on_setattr: bool, converter: Converter +) -> str: + """ + Unless *attr_name* has an on_setattr hook, use normal assignment after + conversion. Otherwise relegate to _setattr_with_converter. + """ + if has_on_setattr: + return _setattr_with_converter(attr_name, value_var, True, converter) + + return f"self.{attr_name} = {converter._fmt_converter_call(attr_name, value_var)}" + + +def _determine_setters( + frozen: bool, slots: bool, base_attr_map: dict[str, type] +): + """ + Determine the correct setter functions based on whether a class is frozen + and/or slotted. + """ + if frozen is True: + if slots is True: + return (), _setattr, _setattr_with_converter + + # Dict frozen classes assign directly to __dict__. + # But only if the attribute doesn't come from an ancestor slot + # class. + # Note _inst_dict will be used again below if cache_hash is True + + def fmt_setter( + attr_name: str, value_var: str, has_on_setattr: bool + ) -> str: + if _is_slot_attr(attr_name, base_attr_map): + return _setattr(attr_name, value_var, has_on_setattr) + + return f"_inst_dict['{attr_name}'] = {value_var}" + + def fmt_setter_with_converter( + attr_name: str, + value_var: str, + has_on_setattr: bool, + converter: Converter, + ) -> str: + if has_on_setattr or _is_slot_attr(attr_name, base_attr_map): + return _setattr_with_converter( + attr_name, value_var, has_on_setattr, converter + ) + + return f"_inst_dict['{attr_name}'] = {converter._fmt_converter_call(attr_name, value_var)}" + + return ( + ("_inst_dict = self.__dict__",), + fmt_setter, + fmt_setter_with_converter, + ) + + # Not frozen -- we can just assign directly. + return (), _assign, _assign_with_converter + + +def _attrs_to_init_script( + attrs: list[Attribute], + is_frozen: bool, + is_slotted: bool, + call_pre_init: bool, + pre_init_has_args: bool, + call_post_init: bool, + does_cache_hash: bool, + base_attr_map: dict[str, type], + is_exc: bool, + needs_cached_setattr: bool, + has_cls_on_setattr: bool, + method_name: str, +) -> tuple[str, dict, dict]: + """ + Return a script of an initializer for *attrs*, a dict of globals, and + annotations for the initializer. + + The globals are required by the generated script. + """ + lines = ["self.__attrs_pre_init__()"] if call_pre_init else [] + + if needs_cached_setattr: + lines.append( + # Circumvent the __setattr__ descriptor to save one lookup per + # assignment. Note _setattr will be used again below if + # does_cache_hash is True. + "_setattr = _cached_setattr_get(self)" + ) + + extra_lines, fmt_setter, fmt_setter_with_converter = _determine_setters( + is_frozen, is_slotted, base_attr_map + ) + lines.extend(extra_lines) + + args = [] # Parameters in the definition of __init__ + pre_init_args = [] # Parameters in the call to __attrs_pre_init__ + kw_only_args = [] # Used for both 'args' and 'pre_init_args' above + attrs_to_validate = [] + + # This is a dictionary of names to validator and converter callables. + # Injecting this into __init__ globals lets us avoid lookups. + names_for_globals = {} + annotations = {"return": None} + + for a in attrs: + if a.validator: + attrs_to_validate.append(a) + + attr_name = a.name + has_on_setattr = a.on_setattr is not None or ( + a.on_setattr is not setters.NO_OP and has_cls_on_setattr + ) + # a.alias is set to maybe-mangled attr_name in _ClassBuilder if not + # explicitly provided + arg_name = a.alias + + has_factory = isinstance(a.default, Factory) + maybe_self = "self" if has_factory and a.default.takes_self else "" + + if a.converter is not None and not isinstance(a.converter, Converter): + converter = Converter(a.converter) + else: + converter = a.converter + + if a.init is False: + if has_factory: + init_factory_name = _INIT_FACTORY_PAT % (a.name,) + if converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, + init_factory_name + f"({maybe_self})", + has_on_setattr, + converter, + ) + ) + names_for_globals[converter._get_global_name(a.name)] = ( + converter.converter + ) + else: + lines.append( + fmt_setter( + attr_name, + init_factory_name + f"({maybe_self})", + has_on_setattr, + ) + ) + names_for_globals[init_factory_name] = a.default.factory + elif converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, + f"attr_dict['{attr_name}'].default", + has_on_setattr, + converter, + ) + ) + names_for_globals[converter._get_global_name(a.name)] = ( + converter.converter + ) + else: + lines.append( + fmt_setter( + attr_name, + f"attr_dict['{attr_name}'].default", + has_on_setattr, + ) + ) + elif a.default is not NOTHING and not has_factory: + arg = f"{arg_name}=attr_dict['{attr_name}'].default" + if a.kw_only: + kw_only_args.append(arg) + else: + args.append(arg) + pre_init_args.append(arg_name) + + if converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr, converter + ) + ) + names_for_globals[converter._get_global_name(a.name)] = ( + converter.converter + ) + else: + lines.append(fmt_setter(attr_name, arg_name, has_on_setattr)) + + elif has_factory: + arg = f"{arg_name}=NOTHING" + if a.kw_only: + kw_only_args.append(arg) + else: + args.append(arg) + pre_init_args.append(arg_name) + lines.append(f"if {arg_name} is not NOTHING:") + + init_factory_name = _INIT_FACTORY_PAT % (a.name,) + if converter is not None: + lines.append( + " " + + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr, converter + ) + ) + lines.append("else:") + lines.append( + " " + + fmt_setter_with_converter( + attr_name, + init_factory_name + "(" + maybe_self + ")", + has_on_setattr, + converter, + ) + ) + names_for_globals[converter._get_global_name(a.name)] = ( + converter.converter + ) + else: + lines.append( + " " + fmt_setter(attr_name, arg_name, has_on_setattr) + ) + lines.append("else:") + lines.append( + " " + + fmt_setter( + attr_name, + init_factory_name + "(" + maybe_self + ")", + has_on_setattr, + ) + ) + names_for_globals[init_factory_name] = a.default.factory + else: + if a.kw_only: + kw_only_args.append(arg_name) + else: + args.append(arg_name) + pre_init_args.append(arg_name) + + if converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr, converter + ) + ) + names_for_globals[converter._get_global_name(a.name)] = ( + converter.converter + ) + else: + lines.append(fmt_setter(attr_name, arg_name, has_on_setattr)) + + if a.init is True: + if a.type is not None and converter is None: + annotations[arg_name] = a.type + elif converter is not None and converter._first_param_type: + # Use the type from the converter if present. + annotations[arg_name] = converter._first_param_type + + if attrs_to_validate: # we can skip this if there are no validators. + names_for_globals["_config"] = _config + lines.append("if _config._run_validators is True:") + for a in attrs_to_validate: + val_name = "__attr_validator_" + a.name + attr_name = "__attr_" + a.name + lines.append(f" {val_name}(self, {attr_name}, self.{a.name})") + names_for_globals[val_name] = a.validator + names_for_globals[attr_name] = a + + if call_post_init: + lines.append("self.__attrs_post_init__()") + + # Because this is set only after __attrs_post_init__ is called, a crash + # will result if post-init tries to access the hash code. This seemed + # preferable to setting this beforehand, in which case alteration to field + # values during post-init combined with post-init accessing the hash code + # would result in silent bugs. + if does_cache_hash: + if is_frozen: + if is_slotted: + init_hash_cache = f"_setattr('{_HASH_CACHE_FIELD}', None)" + else: + init_hash_cache = f"_inst_dict['{_HASH_CACHE_FIELD}'] = None" + else: + init_hash_cache = f"self.{_HASH_CACHE_FIELD} = None" + lines.append(init_hash_cache) + + # For exceptions we rely on BaseException.__init__ for proper + # initialization. + if is_exc: + vals = ",".join(f"self.{a.name}" for a in attrs if a.init) + + lines.append(f"BaseException.__init__(self, {vals})") + + args = ", ".join(args) + pre_init_args = ", ".join(pre_init_args) + if kw_only_args: + # leading comma & kw_only args + args += f"{', ' if args else ''}*, {', '.join(kw_only_args)}" + pre_init_kw_only_args = ", ".join( + [ + f"{kw_arg_name}={kw_arg_name}" + # We need to remove the defaults from the kw_only_args. + for kw_arg_name in (kwa.split("=")[0] for kwa in kw_only_args) + ] + ) + pre_init_args += ", " if pre_init_args else "" + pre_init_args += pre_init_kw_only_args + + if call_pre_init and pre_init_has_args: + # If pre init method has arguments, pass the values given to __init__. + lines[0] = f"self.__attrs_pre_init__({pre_init_args})" + + # Python <3.12 doesn't allow backslashes in f-strings. + NL = "\n " + return ( + f"""def {method_name}(self, {args}): + {NL.join(lines) if lines else "pass"} +""", + names_for_globals, + annotations, + ) + + +def _default_init_alias_for(name: str) -> str: + """ + The default __init__ parameter name for a field. + + This performs private-name adjustment via leading-unscore stripping, + and is the default value of Attribute.alias if not provided. + """ + + return name.lstrip("_") + + +class Attribute: + """ + *Read-only* representation of an attribute. + + .. warning:: + + You should never instantiate this class yourself. + + The class has *all* arguments of `attr.ib` (except for ``factory`` which is + only syntactic sugar for ``default=Factory(...)`` plus the following: + + - ``name`` (`str`): The name of the attribute. + - ``alias`` (`str`): The __init__ parameter name of the attribute, after + any explicit overrides and default private-attribute-name handling. + - ``inherited`` (`bool`): Whether or not that attribute has been inherited + from a base class. + - ``eq_key`` and ``order_key`` (`typing.Callable` or `None`): The + callables that are used for comparing and ordering objects by this + attribute, respectively. These are set by passing a callable to + `attr.ib`'s ``eq``, ``order``, or ``cmp`` arguments. See also + :ref:`comparison customization `. + + Instances of this class are frequently used for introspection purposes + like: + + - `fields` returns a tuple of them. + - Validators get them passed as the first argument. + - The :ref:`field transformer ` hook receives a list of + them. + - The ``alias`` property exposes the __init__ parameter name of the field, + with any overrides and default private-attribute handling applied. + + + .. versionadded:: 20.1.0 *inherited* + .. versionadded:: 20.1.0 *on_setattr* + .. versionchanged:: 20.2.0 *inherited* is not taken into account for + equality checks and hashing anymore. + .. versionadded:: 21.1.0 *eq_key* and *order_key* + .. versionadded:: 22.2.0 *alias* + + For the full version history of the fields, see `attr.ib`. + """ + + # These slots must NOT be reordered because we use them later for + # instantiation. + __slots__ = ( # noqa: RUF023 + "name", + "default", + "validator", + "repr", + "eq", + "eq_key", + "order", + "order_key", + "hash", + "init", + "metadata", + "type", + "converter", + "kw_only", + "inherited", + "on_setattr", + "alias", + ) + + def __init__( + self, + name, + default, + validator, + repr, + cmp, # XXX: unused, remove along with other cmp code. + hash, + init, + inherited, + metadata=None, + type=None, + converter=None, + kw_only=False, + eq=None, + eq_key=None, + order=None, + order_key=None, + on_setattr=None, + alias=None, + ): + eq, eq_key, order, order_key = _determine_attrib_eq_order( + cmp, eq_key or eq, order_key or order, True + ) + + # Cache this descriptor here to speed things up later. + bound_setattr = _OBJ_SETATTR.__get__(self) + + # Despite the big red warning, people *do* instantiate `Attribute` + # themselves. + bound_setattr("name", name) + bound_setattr("default", default) + bound_setattr("validator", validator) + bound_setattr("repr", repr) + bound_setattr("eq", eq) + bound_setattr("eq_key", eq_key) + bound_setattr("order", order) + bound_setattr("order_key", order_key) + bound_setattr("hash", hash) + bound_setattr("init", init) + bound_setattr("converter", converter) + bound_setattr( + "metadata", + ( + types.MappingProxyType(dict(metadata)) # Shallow copy + if metadata + else _EMPTY_METADATA_SINGLETON + ), + ) + bound_setattr("type", type) + bound_setattr("kw_only", kw_only) + bound_setattr("inherited", inherited) + bound_setattr("on_setattr", on_setattr) + bound_setattr("alias", alias) + + def __setattr__(self, name, value): + raise FrozenInstanceError + + @classmethod + def from_counting_attr( + cls, name: str, ca: _CountingAttr, kw_only: bool, type=None + ): + # The 'kw_only' argument is the class-level setting, and is used if the + # attribute itself does not explicitly set 'kw_only'. + # type holds the annotated value. deal with conflicts: + if type is None: + type = ca.type + elif ca.type is not None: + msg = f"Type annotation and type argument cannot both be present for '{name}'." + raise ValueError(msg) + return cls( + name, + ca._default, + ca._validator, + ca.repr, + None, + ca.hash, + ca.init, + False, + ca.metadata, + type, + ca.converter, + kw_only if ca.kw_only is None else ca.kw_only, + ca.eq, + ca.eq_key, + ca.order, + ca.order_key, + ca.on_setattr, + ca.alias, + ) + + # Don't use attrs.evolve since fields(Attribute) doesn't work + def evolve(self, **changes): + """ + Copy *self* and apply *changes*. + + This works similarly to `attrs.evolve` but that function does not work + with :class:`attrs.Attribute`. + + It is mainly meant to be used for `transform-fields`. + + .. versionadded:: 20.3.0 + """ + new = copy.copy(self) + + new._setattrs(changes.items()) + + return new + + # Don't use _add_pickle since fields(Attribute) doesn't work + def __getstate__(self): + """ + Play nice with pickle. + """ + return tuple( + getattr(self, name) if name != "metadata" else dict(self.metadata) + for name in self.__slots__ + ) + + def __setstate__(self, state): + """ + Play nice with pickle. + """ + self._setattrs(zip(self.__slots__, state)) + + def _setattrs(self, name_values_pairs): + bound_setattr = _OBJ_SETATTR.__get__(self) + for name, value in name_values_pairs: + if name != "metadata": + bound_setattr(name, value) + else: + bound_setattr( + name, + ( + types.MappingProxyType(dict(value)) + if value + else _EMPTY_METADATA_SINGLETON + ), + ) + + +_a = [ + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + eq=True, + order=False, + hash=(name != "metadata"), + init=True, + inherited=False, + alias=_default_init_alias_for(name), + ) + for name in Attribute.__slots__ +] + +Attribute = _add_hash( + _add_eq( + _add_repr(Attribute, attrs=_a), + attrs=[a for a in _a if a.name != "inherited"], + ), + attrs=[a for a in _a if a.hash and a.name != "inherited"], +) + + +class _CountingAttr: + """ + Intermediate representation of attributes that uses a counter to preserve + the order in which the attributes have been defined. + + *Internal* data structure of the attrs library. Running into is most + likely the result of a bug like a forgotten `@attr.s` decorator. + """ + + __slots__ = ( + "_default", + "_validator", + "alias", + "converter", + "counter", + "eq", + "eq_key", + "hash", + "init", + "kw_only", + "metadata", + "on_setattr", + "order", + "order_key", + "repr", + "type", + ) + __attrs_attrs__ = ( + *tuple( + Attribute( + name=name, + alias=_default_init_alias_for(name), + default=NOTHING, + validator=None, + repr=True, + cmp=None, + hash=True, + init=True, + kw_only=False, + eq=True, + eq_key=None, + order=False, + order_key=None, + inherited=False, + on_setattr=None, + ) + for name in ( + "counter", + "_default", + "repr", + "eq", + "order", + "hash", + "init", + "on_setattr", + "alias", + ) + ), + Attribute( + name="metadata", + alias="metadata", + default=None, + validator=None, + repr=True, + cmp=None, + hash=False, + init=True, + kw_only=False, + eq=True, + eq_key=None, + order=False, + order_key=None, + inherited=False, + on_setattr=None, + ), + ) + cls_counter = 0 + + def __init__( + self, + default, + validator, + repr, + cmp, + hash, + init, + converter, + metadata, + type, + kw_only, + eq, + eq_key, + order, + order_key, + on_setattr, + alias, + ): + _CountingAttr.cls_counter += 1 + self.counter = _CountingAttr.cls_counter + self._default = default + self._validator = validator + self.converter = converter + self.repr = repr + self.eq = eq + self.eq_key = eq_key + self.order = order + self.order_key = order_key + self.hash = hash + self.init = init + self.metadata = metadata + self.type = type + self.kw_only = kw_only + self.on_setattr = on_setattr + self.alias = alias + + def validator(self, meth): + """ + Decorator that adds *meth* to the list of validators. + + Returns *meth* unchanged. + + .. versionadded:: 17.1.0 + """ + if self._validator is None: + self._validator = meth + else: + self._validator = and_(self._validator, meth) + return meth + + def default(self, meth): + """ + Decorator that allows to set the default for an attribute. + + Returns *meth* unchanged. + + Raises: + DefaultAlreadySetError: If default has been set before. + + .. versionadded:: 17.1.0 + """ + if self._default is not NOTHING: + raise DefaultAlreadySetError + + self._default = Factory(meth, takes_self=True) + + return meth + + +_CountingAttr = _add_eq(_add_repr(_CountingAttr)) + + +class ClassProps: + """ + Effective class properties as derived from parameters to `attr.s()` or + `define()` decorators. + + This is the same data structure that *attrs* uses internally to decide how + to construct the final class. + + Warning: + + This feature is currently **experimental** and is not covered by our + strict backwards-compatibility guarantees. + + + Attributes: + is_exception (bool): + Whether the class is treated as an exception class. + + is_slotted (bool): + Whether the class is `slotted `. + + has_weakref_slot (bool): + Whether the class has a slot for weak references. + + is_frozen (bool): + Whether the class is frozen. + + kw_only (KeywordOnly): + Whether / how the class enforces keyword-only arguments on the + ``__init__`` method. + + collected_fields_by_mro (bool): + Whether the class fields were collected by method resolution order. + That is, correctly but unlike `dataclasses`. + + added_init (bool): + Whether the class has an *attrs*-generated ``__init__`` method. + + added_repr (bool): + Whether the class has an *attrs*-generated ``__repr__`` method. + + added_eq (bool): + Whether the class has *attrs*-generated equality methods. + + added_ordering (bool): + Whether the class has *attrs*-generated ordering methods. + + hashability (Hashability): How `hashable ` the class is. + + added_match_args (bool): + Whether the class supports positional `match ` over its + fields. + + added_str (bool): + Whether the class has an *attrs*-generated ``__str__`` method. + + added_pickling (bool): + Whether the class has *attrs*-generated ``__getstate__`` and + ``__setstate__`` methods for `pickle`. + + on_setattr_hook (Callable[[Any, Attribute[Any], Any], Any] | None): + The class's ``__setattr__`` hook. + + field_transformer (Callable[[Attribute[Any]], Attribute[Any]] | None): + The class's `field transformers `. + + .. versionadded:: 25.4.0 + """ + + class Hashability(enum.Enum): + """ + The hashability of a class. + + .. versionadded:: 25.4.0 + """ + + HASHABLE = "hashable" + """Write a ``__hash__``.""" + HASHABLE_CACHED = "hashable_cache" + """Write a ``__hash__`` and cache the hash.""" + UNHASHABLE = "unhashable" + """Set ``__hash__`` to ``None``.""" + LEAVE_ALONE = "leave_alone" + """Don't touch ``__hash__``.""" + + class KeywordOnly(enum.Enum): + """ + How attributes should be treated regarding keyword-only parameters. + + .. versionadded:: 25.4.0 + """ + + NO = "no" + """Attributes are not keyword-only.""" + YES = "yes" + """Attributes in current class without kw_only=False are keyword-only.""" + FORCE = "force" + """All attributes are keyword-only.""" + + __slots__ = ( # noqa: RUF023 -- order matters for __init__ + "is_exception", + "is_slotted", + "has_weakref_slot", + "is_frozen", + "kw_only", + "collected_fields_by_mro", + "added_init", + "added_repr", + "added_eq", + "added_ordering", + "hashability", + "added_match_args", + "added_str", + "added_pickling", + "on_setattr_hook", + "field_transformer", + ) + + def __init__( + self, + is_exception, + is_slotted, + has_weakref_slot, + is_frozen, + kw_only, + collected_fields_by_mro, + added_init, + added_repr, + added_eq, + added_ordering, + hashability, + added_match_args, + added_str, + added_pickling, + on_setattr_hook, + field_transformer, + ): + self.is_exception = is_exception + self.is_slotted = is_slotted + self.has_weakref_slot = has_weakref_slot + self.is_frozen = is_frozen + self.kw_only = kw_only + self.collected_fields_by_mro = collected_fields_by_mro + self.added_init = added_init + self.added_repr = added_repr + self.added_eq = added_eq + self.added_ordering = added_ordering + self.hashability = hashability + self.added_match_args = added_match_args + self.added_str = added_str + self.added_pickling = added_pickling + self.on_setattr_hook = on_setattr_hook + self.field_transformer = field_transformer + + @property + def is_hashable(self): + return ( + self.hashability is ClassProps.Hashability.HASHABLE + or self.hashability is ClassProps.Hashability.HASHABLE_CACHED + ) + + +_cas = [ + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + eq=True, + order=False, + hash=True, + init=True, + inherited=False, + alias=_default_init_alias_for(name), + ) + for name in ClassProps.__slots__ +] + +ClassProps = _add_eq(_add_repr(ClassProps, attrs=_cas), attrs=_cas) + + +class Factory: + """ + Stores a factory callable. + + If passed as the default value to `attrs.field`, the factory is used to + generate a new value. + + Args: + factory (typing.Callable): + A callable that takes either none or exactly one mandatory + positional argument depending on *takes_self*. + + takes_self (bool): + Pass the partially initialized instance that is being initialized + as a positional argument. + + .. versionadded:: 17.1.0 *takes_self* + """ + + __slots__ = ("factory", "takes_self") + + def __init__(self, factory, takes_self=False): + self.factory = factory + self.takes_self = takes_self + + def __getstate__(self): + """ + Play nice with pickle. + """ + return tuple(getattr(self, name) for name in self.__slots__) + + def __setstate__(self, state): + """ + Play nice with pickle. + """ + for name, value in zip(self.__slots__, state): + setattr(self, name, value) + + +_f = [ + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + eq=True, + order=False, + hash=True, + init=True, + inherited=False, + ) + for name in Factory.__slots__ +] + +Factory = _add_hash(_add_eq(_add_repr(Factory, attrs=_f), attrs=_f), attrs=_f) + + +class Converter: + """ + Stores a converter callable. + + Allows for the wrapped converter to take additional arguments. The + arguments are passed in the order they are documented. + + Args: + converter (Callable): A callable that converts the passed value. + + takes_self (bool): + Pass the partially initialized instance that is being initialized + as a positional argument. (default: `False`) + + takes_field (bool): + Pass the field definition (an :class:`Attribute`) into the + converter as a positional argument. (default: `False`) + + .. versionadded:: 24.1.0 + """ + + __slots__ = ( + "__call__", + "_first_param_type", + "_global_name", + "converter", + "takes_field", + "takes_self", + ) + + def __init__(self, converter, *, takes_self=False, takes_field=False): + self.converter = converter + self.takes_self = takes_self + self.takes_field = takes_field + + ex = _AnnotationExtractor(converter) + self._first_param_type = ex.get_first_param_type() + + if not (self.takes_self or self.takes_field): + self.__call__ = lambda value, _, __: self.converter(value) + elif self.takes_self and not self.takes_field: + self.__call__ = lambda value, instance, __: self.converter( + value, instance + ) + elif not self.takes_self and self.takes_field: + self.__call__ = lambda value, __, field: self.converter( + value, field + ) + else: + self.__call__ = lambda value, instance, field: self.converter( + value, instance, field + ) + + rt = ex.get_return_type() + if rt is not None: + self.__call__.__annotations__["return"] = rt + + @staticmethod + def _get_global_name(attr_name: str) -> str: + """ + Return the name that a converter for an attribute name *attr_name* + would have. + """ + return f"__attr_converter_{attr_name}" + + def _fmt_converter_call(self, attr_name: str, value_var: str) -> str: + """ + Return a string that calls the converter for an attribute name + *attr_name* and the value in variable named *value_var* according to + `self.takes_self` and `self.takes_field`. + """ + if not (self.takes_self or self.takes_field): + return f"{self._get_global_name(attr_name)}({value_var})" + + if self.takes_self and self.takes_field: + return f"{self._get_global_name(attr_name)}({value_var}, self, attr_dict['{attr_name}'])" + + if self.takes_self: + return f"{self._get_global_name(attr_name)}({value_var}, self)" + + return f"{self._get_global_name(attr_name)}({value_var}, attr_dict['{attr_name}'])" + + def __getstate__(self): + """ + Return a dict containing only converter and takes_self -- the rest gets + computed when loading. + """ + return { + "converter": self.converter, + "takes_self": self.takes_self, + "takes_field": self.takes_field, + } + + def __setstate__(self, state): + """ + Load instance from state. + """ + self.__init__(**state) + + +_f = [ + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + eq=True, + order=False, + hash=True, + init=True, + inherited=False, + ) + for name in ("converter", "takes_self", "takes_field") +] + +Converter = _add_hash( + _add_eq(_add_repr(Converter, attrs=_f), attrs=_f), attrs=_f +) + + +def make_class( + name, attrs, bases=(object,), class_body=None, **attributes_arguments +): + r""" + A quick way to create a new class called *name* with *attrs*. + + .. note:: + + ``make_class()`` is a thin wrapper around `attr.s`, not `attrs.define` + which means that it doesn't come with some of the improved defaults. + + For example, if you want the same ``on_setattr`` behavior as in + `attrs.define`, you have to pass the hooks yourself: ``make_class(..., + on_setattr=setters.pipe(setters.convert, setters.validate)`` + + .. warning:: + + It is *your* duty to ensure that the class name and the attribute names + are valid identifiers. ``make_class()`` will *not* validate them for + you. + + Args: + name (str): The name for the new class. + + attrs (list | dict): + A list of names or a dictionary of mappings of names to `attr.ib`\ + s / `attrs.field`\ s. + + The order is deduced from the order of the names or attributes + inside *attrs*. Otherwise the order of the definition of the + attributes is used. + + bases (tuple[type, ...]): Classes that the new class will subclass. + + class_body (dict): + An optional dictionary of class attributes for the new class. + + attributes_arguments: Passed unmodified to `attr.s`. + + Returns: + type: A new class with *attrs*. + + .. versionadded:: 17.1.0 *bases* + .. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained. + .. versionchanged:: 23.2.0 *class_body* + .. versionchanged:: 25.2.0 Class names can now be unicode. + """ + # Class identifiers are converted into the normal form NFKC while parsing + name = unicodedata.normalize("NFKC", name) + + if isinstance(attrs, dict): + cls_dict = attrs + elif isinstance(attrs, (list, tuple)): + cls_dict = {a: attrib() for a in attrs} + else: + msg = "attrs argument must be a dict or a list." + raise TypeError(msg) + + pre_init = cls_dict.pop("__attrs_pre_init__", None) + post_init = cls_dict.pop("__attrs_post_init__", None) + user_init = cls_dict.pop("__init__", None) + + body = {} + if class_body is not None: + body.update(class_body) + if pre_init is not None: + body["__attrs_pre_init__"] = pre_init + if post_init is not None: + body["__attrs_post_init__"] = post_init + if user_init is not None: + body["__init__"] = user_init + + type_ = types.new_class(name, bases, {}, lambda ns: ns.update(body)) + + # For pickling to work, the __module__ variable needs to be set to the + # frame where the class is created. Bypass this step in environments where + # sys._getframe is not defined (Jython for example) or sys._getframe is not + # defined for arguments greater than 0 (IronPython). + with contextlib.suppress(AttributeError, ValueError): + type_.__module__ = sys._getframe(1).f_globals.get( + "__name__", "__main__" + ) + + # We do it here for proper warnings with meaningful stacklevel. + cmp = attributes_arguments.pop("cmp", None) + ( + attributes_arguments["eq"], + attributes_arguments["order"], + ) = _determine_attrs_eq_order( + cmp, + attributes_arguments.get("eq"), + attributes_arguments.get("order"), + True, + ) + + cls = _attrs(these=cls_dict, **attributes_arguments)(type_) + # Only add type annotations now or "_attrs()" will complain: + cls.__annotations__ = { + k: v.type for k, v in cls_dict.items() if v.type is not None + } + return cls + + +# These are required by within this module so we define them here and merely +# import into .validators / .converters. + + +@attrs(slots=True, unsafe_hash=True) +class _AndValidator: + """ + Compose many validators to a single one. + """ + + _validators = attrib() + + def __call__(self, inst, attr, value): + for v in self._validators: + v(inst, attr, value) + + +def and_(*validators): + """ + A validator that composes multiple validators into one. + + When called on a value, it runs all wrapped validators. + + Args: + validators (~collections.abc.Iterable[typing.Callable]): + Arbitrary number of validators. + + .. versionadded:: 17.1.0 + """ + vals = [] + for validator in validators: + vals.extend( + validator._validators + if isinstance(validator, _AndValidator) + else [validator] + ) + + return _AndValidator(tuple(vals)) + + +def pipe(*converters): + """ + A converter that composes multiple converters into one. + + When called on a value, it runs all wrapped converters, returning the + *last* value. + + Type annotations will be inferred from the wrapped converters', if they + have any. + + converters (~collections.abc.Iterable[typing.Callable]): + Arbitrary number of converters. + + .. versionadded:: 20.1.0 + """ + + return_instance = any(isinstance(c, Converter) for c in converters) + + if return_instance: + + def pipe_converter(val, inst, field): + for c in converters: + val = ( + c(val, inst, field) if isinstance(c, Converter) else c(val) + ) + + return val + + else: + + def pipe_converter(val): + for c in converters: + val = c(val) + + return val + + if not converters: + # If the converter list is empty, pipe_converter is the identity. + A = TypeVar("A") + pipe_converter.__annotations__.update({"val": A, "return": A}) + else: + # Get parameter type from first converter. + t = _AnnotationExtractor(converters[0]).get_first_param_type() + if t: + pipe_converter.__annotations__["val"] = t + + last = converters[-1] + if not PY_3_11_PLUS and isinstance(last, Converter): + last = last.__call__ + + # Get return type from last converter. + rt = _AnnotationExtractor(last).get_return_type() + if rt: + pipe_converter.__annotations__["return"] = rt + + if return_instance: + return Converter(pipe_converter, takes_self=True, takes_field=True) + return pipe_converter diff --git a/py311/lib/python3.11/site-packages/attr/_next_gen.py b/py311/lib/python3.11/site-packages/attr/_next_gen.py new file mode 100644 index 0000000000000000000000000000000000000000..4ccd0da2446dc126ce936b054581a527e247cabc --- /dev/null +++ b/py311/lib/python3.11/site-packages/attr/_next_gen.py @@ -0,0 +1,674 @@ +# SPDX-License-Identifier: MIT + +""" +These are keyword-only APIs that call `attr.s` and `attr.ib` with different +default values. +""" + +from functools import partial + +from . import setters +from ._funcs import asdict as _asdict +from ._funcs import astuple as _astuple +from ._make import ( + _DEFAULT_ON_SETATTR, + NOTHING, + _frozen_setattrs, + attrib, + attrs, +) +from .exceptions import NotAnAttrsClassError, UnannotatedAttributeError + + +def define( + maybe_cls=None, + *, + these=None, + repr=None, + unsafe_hash=None, + hash=None, + init=None, + slots=True, + frozen=False, + weakref_slot=True, + str=False, + auto_attribs=None, + kw_only=False, + cache_hash=False, + auto_exc=True, + eq=None, + order=False, + auto_detect=True, + getstate_setstate=None, + on_setattr=None, + field_transformer=None, + match_args=True, + force_kw_only=False, +): + r""" + A class decorator that adds :term:`dunder methods` according to + :term:`fields ` specified using :doc:`type annotations `, + `field()` calls, or the *these* argument. + + Since *attrs* patches or replaces an existing class, you cannot use + `object.__init_subclass__` with *attrs* classes, because it runs too early. + As a replacement, you can define ``__attrs_init_subclass__`` on your class. + It will be called by *attrs* classes that subclass it after they're + created. See also :ref:`init-subclass`. + + Args: + slots (bool): + Create a :term:`slotted class ` that's more + memory-efficient. Slotted classes are generally superior to the + default dict classes, but have some gotchas you should know about, + so we encourage you to read the :term:`glossary entry `. + + auto_detect (bool): + Instead of setting the *init*, *repr*, *eq*, and *hash* arguments + explicitly, assume they are set to True **unless any** of the + involved methods for one of the arguments is implemented in the + *current* class (meaning, it is *not* inherited from some base + class). + + So, for example by implementing ``__eq__`` on a class yourself, + *attrs* will deduce ``eq=False`` and will create *neither* + ``__eq__`` *nor* ``__ne__`` (but Python classes come with a + sensible ``__ne__`` by default, so it *should* be enough to only + implement ``__eq__`` in most cases). + + Passing :data:`True` or :data:`False` to *init*, *repr*, *eq*, or *hash* + overrides whatever *auto_detect* would determine. + + auto_exc (bool): + If the class subclasses `BaseException` (which implicitly includes + any subclass of any exception), the following happens to behave + like a well-behaved Python exception class: + + - the values for *eq*, *order*, and *hash* are ignored and the + instances compare and hash by the instance's ids [#]_ , + - all attributes that are either passed into ``__init__`` or have a + default value are additionally available as a tuple in the + ``args`` attribute, + - the value of *str* is ignored leaving ``__str__`` to base + classes. + + .. [#] + Note that *attrs* will *not* remove existing implementations of + ``__hash__`` or the equality methods. It just won't add own + ones. + + on_setattr (~typing.Callable | list[~typing.Callable] | None | ~typing.Literal[attrs.setters.NO_OP]): + A callable that is run whenever the user attempts to set an + attribute (either by assignment like ``i.x = 42`` or by using + `setattr` like ``setattr(i, "x", 42)``). It receives the same + arguments as validators: the instance, the attribute that is being + modified, and the new value. + + If no exception is raised, the attribute is set to the return value + of the callable. + + If a list of callables is passed, they're automatically wrapped in + an `attrs.setters.pipe`. + + If left None, the default behavior is to run converters and + validators whenever an attribute is set. + + init (bool): + Create a ``__init__`` method that initializes the *attrs* + attributes. Leading underscores are stripped for the argument name, + unless an alias is set on the attribute. + + .. seealso:: + `init` shows advanced ways to customize the generated + ``__init__`` method, including executing code before and after. + + repr(bool): + Create a ``__repr__`` method with a human readable representation + of *attrs* attributes. + + str (bool): + Create a ``__str__`` method that is identical to ``__repr__``. This + is usually not necessary except for `Exception`\ s. + + eq (bool | None): + If True or None (default), add ``__eq__`` and ``__ne__`` methods + that check two instances for equality. + + .. seealso:: + `comparison` describes how to customize the comparison behavior + going as far comparing NumPy arrays. + + order (bool | None): + If True, add ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` + methods that behave like *eq* above and allow instances to be + ordered. + + They compare the instances as if they were tuples of their *attrs* + attributes if and only if the types of both classes are + *identical*. + + If `None` mirror value of *eq*. + + .. seealso:: `comparison` + + unsafe_hash (bool | None): + If None (default), the ``__hash__`` method is generated according + how *eq* and *frozen* are set. + + 1. If *both* are True, *attrs* will generate a ``__hash__`` for + you. + 2. If *eq* is True and *frozen* is False, ``__hash__`` will be set + to None, marking it unhashable (which it is). + 3. If *eq* is False, ``__hash__`` will be left untouched meaning + the ``__hash__`` method of the base class will be used. If the + base class is `object`, this means it will fall back to id-based + hashing. + + Although not recommended, you can decide for yourself and force + *attrs* to create one (for example, if the class is immutable even + though you didn't freeze it programmatically) by passing True or + not. Both of these cases are rather special and should be used + carefully. + + .. seealso:: + + - Our documentation on `hashing`, + - Python's documentation on `object.__hash__`, + - and the `GitHub issue that led to the default \ behavior + `_ for more + details. + + hash (bool | None): + Deprecated alias for *unsafe_hash*. *unsafe_hash* takes precedence. + + cache_hash (bool): + Ensure that the object's hash code is computed only once and stored + on the object. If this is set to True, hashing must be either + explicitly or implicitly enabled for this class. If the hash code + is cached, avoid any reassignments of fields involved in hash code + computation or mutations of the objects those fields point to after + object creation. If such changes occur, the behavior of the + object's hash code is undefined. + + frozen (bool): + Make instances immutable after initialization. If someone attempts + to modify a frozen instance, `attrs.exceptions.FrozenInstanceError` + is raised. + + .. note:: + + 1. This is achieved by installing a custom ``__setattr__`` + method on your class, so you can't implement your own. + + 2. True immutability is impossible in Python. + + 3. This *does* have a minor a runtime performance `impact + ` when initializing new instances. In other + words: ``__init__`` is slightly slower with ``frozen=True``. + + 4. If a class is frozen, you cannot modify ``self`` in + ``__attrs_post_init__`` or a self-written ``__init__``. You + can circumvent that limitation by using + ``object.__setattr__(self, "attribute_name", value)``. + + 5. Subclasses of a frozen class are frozen too. + + kw_only (bool): + Make attributes keyword-only in the generated ``__init__`` (if + *init* is False, this parameter is ignored). Attributes that + explicitly set ``kw_only=False`` are not affected; base class + attributes are also not affected. + + Also see *force_kw_only*. + + weakref_slot (bool): + Make instances weak-referenceable. This has no effect unless + *slots* is True. + + field_transformer (~typing.Callable | None): + A function that is called with the original class object and all + fields right before *attrs* finalizes the class. You can use this, + for example, to automatically add converters or validators to + fields based on their types. + + .. seealso:: `transform-fields` + + match_args (bool): + If True (default), set ``__match_args__`` on the class to support + :pep:`634` (*Structural Pattern Matching*). It is a tuple of all + non-keyword-only ``__init__`` parameter names on Python 3.10 and + later. Ignored on older Python versions. + + collect_by_mro (bool): + If True, *attrs* collects attributes from base classes correctly + according to the `method resolution order + `_. If False, *attrs* + will mimic the (wrong) behavior of `dataclasses` and :pep:`681`. + + See also `issue #428 + `_. + + force_kw_only (bool): + A back-compat flag for restoring pre-25.4.0 behavior. If True and + ``kw_only=True``, all attributes are made keyword-only, including + base class attributes, and those set to ``kw_only=False`` at the + attribute level. Defaults to False. + + See also `issue #980 + `_. + + getstate_setstate (bool | None): + .. note:: + + This is usually only interesting for slotted classes and you + should probably just set *auto_detect* to True. + + If True, ``__getstate__`` and ``__setstate__`` are generated and + attached to the class. This is necessary for slotted classes to be + pickleable. If left None, it's True by default for slotted classes + and False for dict classes. + + If *auto_detect* is True, and *getstate_setstate* is left None, and + **either** ``__getstate__`` or ``__setstate__`` is detected + directly on the class (meaning: not inherited), it is set to False + (this is usually what you want). + + auto_attribs (bool | None): + If True, look at type annotations to determine which attributes to + use, like `dataclasses`. If False, it will only look for explicit + :func:`field` class attributes, like classic *attrs*. + + If left None, it will guess: + + 1. If any attributes are annotated and no unannotated + `attrs.field`\ s are found, it assumes *auto_attribs=True*. + 2. Otherwise it assumes *auto_attribs=False* and tries to collect + `attrs.field`\ s. + + If *attrs* decides to look at type annotations, **all** fields + **must** be annotated. If *attrs* encounters a field that is set to + a :func:`field` / `attr.ib` but lacks a type annotation, an + `attrs.exceptions.UnannotatedAttributeError` is raised. Use + ``field_name: typing.Any = field(...)`` if you don't want to set a + type. + + .. warning:: + + For features that use the attribute name to create decorators + (for example, :ref:`validators `), you still *must* + assign :func:`field` / `attr.ib` to them. Otherwise Python will + either not find the name or try to use the default value to + call, for example, ``validator`` on it. + + Attributes annotated as `typing.ClassVar`, and attributes that are + neither annotated nor set to an `field()` are **ignored**. + + these (dict[str, object]): + A dictionary of name to the (private) return value of `field()` + mappings. This is useful to avoid the definition of your attributes + within the class body because you can't (for example, if you want + to add ``__repr__`` methods to Django models) or don't want to. + + If *these* is not `None`, *attrs* will *not* search the class body + for attributes and will *not* remove any attributes from it. + + The order is deduced from the order of the attributes inside + *these*. + + Arguably, this is a rather obscure feature. + + .. versionadded:: 20.1.0 + .. versionchanged:: 21.3.0 Converters are also run ``on_setattr``. + .. versionadded:: 22.2.0 + *unsafe_hash* as an alias for *hash* (for :pep:`681` compliance). + .. versionchanged:: 24.1.0 + Instances are not compared as tuples of attributes anymore, but using a + big ``and`` condition. This is faster and has more correct behavior for + uncomparable values like `math.nan`. + .. versionadded:: 24.1.0 + If a class has an *inherited* classmethod called + ``__attrs_init_subclass__``, it is executed after the class is created. + .. deprecated:: 24.1.0 *hash* is deprecated in favor of *unsafe_hash*. + .. versionadded:: 24.3.0 + Unless already present, a ``__replace__`` method is automatically + created for `copy.replace` (Python 3.13+ only). + .. versionchanged:: 25.4.0 + *kw_only* now only applies to attributes defined in the current class, + and respects attribute-level ``kw_only=False`` settings. + .. versionadded:: 25.4.0 + Added *force_kw_only* to go back to the previous *kw_only* behavior. + + .. note:: + + The main differences to the classic `attr.s` are: + + - Automatically detect whether or not *auto_attribs* should be `True` + (c.f. *auto_attribs* parameter). + - Converters and validators run when attributes are set by default -- + if *frozen* is `False`. + - *slots=True* + + Usually, this has only upsides and few visible effects in everyday + programming. But it *can* lead to some surprising behaviors, so + please make sure to read :term:`slotted classes`. + + - *auto_exc=True* + - *auto_detect=True* + - *order=False* + - *force_kw_only=False* + - Some options that were only relevant on Python 2 or were kept around + for backwards-compatibility have been removed. + + """ + + def do_it(cls, auto_attribs): + return attrs( + maybe_cls=cls, + these=these, + repr=repr, + hash=hash, + unsafe_hash=unsafe_hash, + init=init, + slots=slots, + frozen=frozen, + weakref_slot=weakref_slot, + str=str, + auto_attribs=auto_attribs, + kw_only=kw_only, + cache_hash=cache_hash, + auto_exc=auto_exc, + eq=eq, + order=order, + auto_detect=auto_detect, + collect_by_mro=True, + getstate_setstate=getstate_setstate, + on_setattr=on_setattr, + field_transformer=field_transformer, + match_args=match_args, + force_kw_only=force_kw_only, + ) + + def wrap(cls): + """ + Making this a wrapper ensures this code runs during class creation. + + We also ensure that frozen-ness of classes is inherited. + """ + nonlocal frozen, on_setattr + + had_on_setattr = on_setattr not in (None, setters.NO_OP) + + # By default, mutable classes convert & validate on setattr. + if frozen is False and on_setattr is None: + on_setattr = _DEFAULT_ON_SETATTR + + # However, if we subclass a frozen class, we inherit the immutability + # and disable on_setattr. + for base_cls in cls.__bases__: + if base_cls.__setattr__ is _frozen_setattrs: + if had_on_setattr: + msg = "Frozen classes can't use on_setattr (frozen-ness was inherited)." + raise ValueError(msg) + + on_setattr = setters.NO_OP + break + + if auto_attribs is not None: + return do_it(cls, auto_attribs) + + try: + return do_it(cls, True) + except UnannotatedAttributeError: + return do_it(cls, False) + + # maybe_cls's type depends on the usage of the decorator. It's a class + # if it's used as `@attrs` but `None` if used as `@attrs()`. + if maybe_cls is None: + return wrap + + return wrap(maybe_cls) + + +mutable = define +frozen = partial(define, frozen=True, on_setattr=None) + + +def field( + *, + default=NOTHING, + validator=None, + repr=True, + hash=None, + init=True, + metadata=None, + type=None, + converter=None, + factory=None, + kw_only=None, + eq=None, + order=None, + on_setattr=None, + alias=None, +): + """ + Create a new :term:`field` / :term:`attribute` on a class. + + .. warning:: + + Does **nothing** unless the class is also decorated with + `attrs.define` (or similar)! + + Args: + default: + A value that is used if an *attrs*-generated ``__init__`` is used + and no value is passed while instantiating or the attribute is + excluded using ``init=False``. + + If the value is an instance of `attrs.Factory`, its callable will + be used to construct a new value (useful for mutable data types + like lists or dicts). + + If a default is not set (or set manually to `attrs.NOTHING`), a + value *must* be supplied when instantiating; otherwise a + `TypeError` will be raised. + + .. seealso:: `defaults` + + factory (~typing.Callable): + Syntactic sugar for ``default=attr.Factory(factory)``. + + validator (~typing.Callable | list[~typing.Callable]): + Callable that is called by *attrs*-generated ``__init__`` methods + after the instance has been initialized. They receive the + initialized instance, the :func:`~attrs.Attribute`, and the passed + value. + + The return value is *not* inspected so the validator has to throw + an exception itself. + + If a `list` is passed, its items are treated as validators and must + all pass. + + Validators can be globally disabled and re-enabled using + `attrs.validators.get_disabled` / `attrs.validators.set_disabled`. + + The validator can also be set using decorator notation as shown + below. + + .. seealso:: :ref:`validators` + + repr (bool | ~typing.Callable): + Include this attribute in the generated ``__repr__`` method. If + True, include the attribute; if False, omit it. By default, the + built-in ``repr()`` function is used. To override how the attribute + value is formatted, pass a ``callable`` that takes a single value + and returns a string. Note that the resulting string is used as-is, + which means it will be used directly *instead* of calling + ``repr()`` (the default). + + eq (bool | ~typing.Callable): + If True (default), include this attribute in the generated + ``__eq__`` and ``__ne__`` methods that check two instances for + equality. To override how the attribute value is compared, pass a + callable that takes a single value and returns the value to be + compared. + + .. seealso:: `comparison` + + order (bool | ~typing.Callable): + If True (default), include this attributes in the generated + ``__lt__``, ``__le__``, ``__gt__`` and ``__ge__`` methods. To + override how the attribute value is ordered, pass a callable that + takes a single value and returns the value to be ordered. + + .. seealso:: `comparison` + + hash (bool | None): + Include this attribute in the generated ``__hash__`` method. If + None (default), mirror *eq*'s value. This is the correct behavior + according the Python spec. Setting this value to anything else + than None is *discouraged*. + + .. seealso:: `hashing` + + init (bool): + Include this attribute in the generated ``__init__`` method. + + It is possible to set this to False and set a default value. In + that case this attributed is unconditionally initialized with the + specified default value or factory. + + .. seealso:: `init` + + converter (typing.Callable | Converter): + A callable that is called by *attrs*-generated ``__init__`` methods + to convert attribute's value to the desired format. + + If a vanilla callable is passed, it is given the passed-in value as + the only positional argument. It is possible to receive additional + arguments by wrapping the callable in a `Converter`. + + Either way, the returned value will be used as the new value of the + attribute. The value is converted before being passed to the + validator, if any. + + .. seealso:: :ref:`converters` + + metadata (dict | None): + An arbitrary mapping, to be used by third-party code. + + .. seealso:: `extending-metadata`. + + type (type): + The type of the attribute. Nowadays, the preferred method to + specify the type is using a variable annotation (see :pep:`526`). + This argument is provided for backwards-compatibility and for usage + with `make_class`. Regardless of the approach used, the type will + be stored on ``Attribute.type``. + + Please note that *attrs* doesn't do anything with this metadata by + itself. You can use it as part of your own code or for `static type + checking `. + + kw_only (bool | None): + Make this attribute keyword-only in the generated ``__init__`` (if + *init* is False, this parameter is ignored). If None (default), + mirror the setting from `attrs.define`. + + on_setattr (~typing.Callable | list[~typing.Callable] | None | ~typing.Literal[attrs.setters.NO_OP]): + Allows to overwrite the *on_setattr* setting from `attr.s`. If left + None, the *on_setattr* value from `attr.s` is used. Set to + `attrs.setters.NO_OP` to run **no** `setattr` hooks for this + attribute -- regardless of the setting in `define()`. + + alias (str | None): + Override this attribute's parameter name in the generated + ``__init__`` method. If left None, default to ``name`` stripped + of leading underscores. See `private-attributes`. + + .. versionadded:: 20.1.0 + .. versionchanged:: 21.1.0 + *eq*, *order*, and *cmp* also accept a custom callable + .. versionadded:: 22.2.0 *alias* + .. versionadded:: 23.1.0 + The *type* parameter has been re-added; mostly for `attrs.make_class`. + Please note that type checkers ignore this metadata. + .. versionchanged:: 25.4.0 + *kw_only* can now be None, and its default is also changed from False to + None. + + .. seealso:: + + `attr.ib` + """ + return attrib( + default=default, + validator=validator, + repr=repr, + hash=hash, + init=init, + metadata=metadata, + type=type, + converter=converter, + factory=factory, + kw_only=kw_only, + eq=eq, + order=order, + on_setattr=on_setattr, + alias=alias, + ) + + +def asdict(inst, *, recurse=True, filter=None, value_serializer=None): + """ + Same as `attr.asdict`, except that collections types are always retained + and dict is always used as *dict_factory*. + + .. versionadded:: 21.3.0 + """ + return _asdict( + inst=inst, + recurse=recurse, + filter=filter, + value_serializer=value_serializer, + retain_collection_types=True, + ) + + +def astuple(inst, *, recurse=True, filter=None): + """ + Same as `attr.astuple`, except that collections types are always retained + and `tuple` is always used as the *tuple_factory*. + + .. versionadded:: 21.3.0 + """ + return _astuple( + inst=inst, recurse=recurse, filter=filter, retain_collection_types=True + ) + + +def inspect(cls): + """ + Inspect the class and return its effective build parameters. + + Warning: + This feature is currently **experimental** and is not covered by our + strict backwards-compatibility guarantees. + + Args: + cls: The *attrs*-decorated class to inspect. + + Returns: + The effective build parameters of the class. + + Raises: + NotAnAttrsClassError: If the class is not an *attrs*-decorated class. + + .. versionadded:: 25.4.0 + """ + try: + return cls.__dict__["__attrs_props__"] + except KeyError: + msg = f"{cls!r} is not an attrs-decorated class." + raise NotAnAttrsClassError(msg) from None diff --git a/py311/lib/python3.11/site-packages/attr/_typing_compat.pyi b/py311/lib/python3.11/site-packages/attr/_typing_compat.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ca7b71e906a28f88726bbd342fdfe636af0281e7 --- /dev/null +++ b/py311/lib/python3.11/site-packages/attr/_typing_compat.pyi @@ -0,0 +1,15 @@ +from typing import Any, ClassVar, Protocol + +# MYPY is a special constant in mypy which works the same way as `TYPE_CHECKING`. +MYPY = False + +if MYPY: + # A protocol to be able to statically accept an attrs class. + class AttrsInstance_(Protocol): + __attrs_attrs__: ClassVar[Any] + +else: + # For type checkers without plug-in support use an empty protocol that + # will (hopefully) be combined into a union. + class AttrsInstance_(Protocol): + pass diff --git a/py311/lib/python3.11/site-packages/attr/_version_info.py b/py311/lib/python3.11/site-packages/attr/_version_info.py new file mode 100644 index 0000000000000000000000000000000000000000..27f18884ad4d7a42f338e31a8ea19bc5d19999cd --- /dev/null +++ b/py311/lib/python3.11/site-packages/attr/_version_info.py @@ -0,0 +1,89 @@ +# SPDX-License-Identifier: MIT + + +from functools import total_ordering + +from ._funcs import astuple +from ._make import attrib, attrs + + +@total_ordering +@attrs(eq=False, order=False, slots=True, frozen=True) +class VersionInfo: + """ + A version object that can be compared to tuple of length 1--4: + + >>> attr.VersionInfo(19, 1, 0, "final") <= (19, 2) + True + >>> attr.VersionInfo(19, 1, 0, "final") < (19, 1, 1) + True + >>> vi = attr.VersionInfo(19, 2, 0, "final") + >>> vi < (19, 1, 1) + False + >>> vi < (19,) + False + >>> vi == (19, 2,) + True + >>> vi == (19, 2, 1) + False + + .. versionadded:: 19.2 + """ + + year = attrib(type=int) + minor = attrib(type=int) + micro = attrib(type=int) + releaselevel = attrib(type=str) + + @classmethod + def _from_version_string(cls, s): + """ + Parse *s* and return a _VersionInfo. + """ + v = s.split(".") + if len(v) == 3: + v.append("final") + + return cls( + year=int(v[0]), minor=int(v[1]), micro=int(v[2]), releaselevel=v[3] + ) + + def _ensure_tuple(self, other): + """ + Ensure *other* is a tuple of a valid length. + + Returns a possibly transformed *other* and ourselves as a tuple of + the same length as *other*. + """ + + if self.__class__ is other.__class__: + other = astuple(other) + + if not isinstance(other, tuple): + raise NotImplementedError + + if not (1 <= len(other) <= 4): + raise NotImplementedError + + return astuple(self)[: len(other)], other + + def __eq__(self, other): + try: + us, them = self._ensure_tuple(other) + except NotImplementedError: + return NotImplemented + + return us == them + + def __lt__(self, other): + try: + us, them = self._ensure_tuple(other) + except NotImplementedError: + return NotImplemented + + # Since alphabetically "dev0" < "final" < "post1" < "post2", we don't + # have to do anything special with releaselevel for now. + return us < them + + def __hash__(self): + return hash((self.year, self.minor, self.micro, self.releaselevel)) diff --git a/py311/lib/python3.11/site-packages/attr/_version_info.pyi b/py311/lib/python3.11/site-packages/attr/_version_info.pyi new file mode 100644 index 0000000000000000000000000000000000000000..45ced086337783c4b73b26cd17d2c1c260e24029 --- /dev/null +++ b/py311/lib/python3.11/site-packages/attr/_version_info.pyi @@ -0,0 +1,9 @@ +class VersionInfo: + @property + def year(self) -> int: ... + @property + def minor(self) -> int: ... + @property + def micro(self) -> int: ... + @property + def releaselevel(self) -> str: ... diff --git a/py311/lib/python3.11/site-packages/attr/converters.py b/py311/lib/python3.11/site-packages/attr/converters.py new file mode 100644 index 0000000000000000000000000000000000000000..0a79deef04282fb33a42f6aca59563d49e70d4cb --- /dev/null +++ b/py311/lib/python3.11/site-packages/attr/converters.py @@ -0,0 +1,162 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful converters. +""" + +import typing + +from ._compat import _AnnotationExtractor +from ._make import NOTHING, Converter, Factory, pipe + + +__all__ = [ + "default_if_none", + "optional", + "pipe", + "to_bool", +] + + +def optional(converter): + """ + A converter that allows an attribute to be optional. An optional attribute + is one which can be set to `None`. + + Type annotations will be inferred from the wrapped converter's, if it has + any. + + Args: + converter (typing.Callable): + the converter that is used for non-`None` values. + + .. versionadded:: 17.1.0 + """ + + if isinstance(converter, Converter): + + def optional_converter(val, inst, field): + if val is None: + return None + return converter(val, inst, field) + + else: + + def optional_converter(val): + if val is None: + return None + return converter(val) + + xtr = _AnnotationExtractor(converter) + + t = xtr.get_first_param_type() + if t: + optional_converter.__annotations__["val"] = typing.Optional[t] + + rt = xtr.get_return_type() + if rt: + optional_converter.__annotations__["return"] = typing.Optional[rt] + + if isinstance(converter, Converter): + return Converter(optional_converter, takes_self=True, takes_field=True) + + return optional_converter + + +def default_if_none(default=NOTHING, factory=None): + """ + A converter that allows to replace `None` values by *default* or the result + of *factory*. + + Args: + default: + Value to be used if `None` is passed. Passing an instance of + `attrs.Factory` is supported, however the ``takes_self`` option is + *not*. + + factory (typing.Callable): + A callable that takes no parameters whose result is used if `None` + is passed. + + Raises: + TypeError: If **neither** *default* or *factory* is passed. + + TypeError: If **both** *default* and *factory* are passed. + + ValueError: + If an instance of `attrs.Factory` is passed with + ``takes_self=True``. + + .. versionadded:: 18.2.0 + """ + if default is NOTHING and factory is None: + msg = "Must pass either `default` or `factory`." + raise TypeError(msg) + + if default is not NOTHING and factory is not None: + msg = "Must pass either `default` or `factory` but not both." + raise TypeError(msg) + + if factory is not None: + default = Factory(factory) + + if isinstance(default, Factory): + if default.takes_self: + msg = "`takes_self` is not supported by default_if_none." + raise ValueError(msg) + + def default_if_none_converter(val): + if val is not None: + return val + + return default.factory() + + else: + + def default_if_none_converter(val): + if val is not None: + return val + + return default + + return default_if_none_converter + + +def to_bool(val): + """ + Convert "boolean" strings (for example, from environment variables) to real + booleans. + + Values mapping to `True`: + + - ``True`` + - ``"true"`` / ``"t"`` + - ``"yes"`` / ``"y"`` + - ``"on"`` + - ``"1"`` + - ``1`` + + Values mapping to `False`: + + - ``False`` + - ``"false"`` / ``"f"`` + - ``"no"`` / ``"n"`` + - ``"off"`` + - ``"0"`` + - ``0`` + + Raises: + ValueError: For any other value. + + .. versionadded:: 21.3.0 + """ + if isinstance(val, str): + val = val.lower() + + if val in (True, "true", "t", "yes", "y", "on", "1", 1): + return True + if val in (False, "false", "f", "no", "n", "off", "0", 0): + return False + + msg = f"Cannot convert value to bool: {val!r}" + raise ValueError(msg) diff --git a/py311/lib/python3.11/site-packages/attr/converters.pyi b/py311/lib/python3.11/site-packages/attr/converters.pyi new file mode 100644 index 0000000000000000000000000000000000000000..12bd0c4f17bdc60fb8904598af0a3d56d5874a9e --- /dev/null +++ b/py311/lib/python3.11/site-packages/attr/converters.pyi @@ -0,0 +1,19 @@ +from typing import Callable, Any, overload + +from attrs import _ConverterType, _CallableConverterType + +@overload +def pipe(*validators: _CallableConverterType) -> _CallableConverterType: ... +@overload +def pipe(*validators: _ConverterType) -> _ConverterType: ... +@overload +def optional(converter: _CallableConverterType) -> _CallableConverterType: ... +@overload +def optional(converter: _ConverterType) -> _ConverterType: ... +@overload +def default_if_none(default: Any) -> _CallableConverterType: ... +@overload +def default_if_none( + *, factory: Callable[[], Any] +) -> _CallableConverterType: ... +def to_bool(val: str | int | bool) -> bool: ... diff --git a/py311/lib/python3.11/site-packages/attr/exceptions.py b/py311/lib/python3.11/site-packages/attr/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..3b7abb8154108aa1d0ae52fa9ee8e489f05b5563 --- /dev/null +++ b/py311/lib/python3.11/site-packages/attr/exceptions.py @@ -0,0 +1,95 @@ +# SPDX-License-Identifier: MIT + +from __future__ import annotations + +from typing import ClassVar + + +class FrozenError(AttributeError): + """ + A frozen/immutable instance or attribute have been attempted to be + modified. + + It mirrors the behavior of ``namedtuples`` by using the same error message + and subclassing `AttributeError`. + + .. versionadded:: 20.1.0 + """ + + msg = "can't set attribute" + args: ClassVar[tuple[str]] = [msg] + + +class FrozenInstanceError(FrozenError): + """ + A frozen instance has been attempted to be modified. + + .. versionadded:: 16.1.0 + """ + + +class FrozenAttributeError(FrozenError): + """ + A frozen attribute has been attempted to be modified. + + .. versionadded:: 20.1.0 + """ + + +class AttrsAttributeNotFoundError(ValueError): + """ + An *attrs* function couldn't find an attribute that the user asked for. + + .. versionadded:: 16.2.0 + """ + + +class NotAnAttrsClassError(ValueError): + """ + A non-*attrs* class has been passed into an *attrs* function. + + .. versionadded:: 16.2.0 + """ + + +class DefaultAlreadySetError(RuntimeError): + """ + A default has been set when defining the field and is attempted to be reset + using the decorator. + + .. versionadded:: 17.1.0 + """ + + +class UnannotatedAttributeError(RuntimeError): + """ + A class with ``auto_attribs=True`` has a field without a type annotation. + + .. versionadded:: 17.3.0 + """ + + +class PythonTooOldError(RuntimeError): + """ + It was attempted to use an *attrs* feature that requires a newer Python + version. + + .. versionadded:: 18.2.0 + """ + + +class NotCallableError(TypeError): + """ + A field requiring a callable has been set with a value that is not + callable. + + .. versionadded:: 19.2.0 + """ + + def __init__(self, msg, value): + super(TypeError, self).__init__(msg, value) + self.msg = msg + self.value = value + + def __str__(self): + return str(self.msg) diff --git a/py311/lib/python3.11/site-packages/attr/exceptions.pyi b/py311/lib/python3.11/site-packages/attr/exceptions.pyi new file mode 100644 index 0000000000000000000000000000000000000000..f2680118b404db8f5227d04d27e8439331341c4d --- /dev/null +++ b/py311/lib/python3.11/site-packages/attr/exceptions.pyi @@ -0,0 +1,17 @@ +from typing import Any + +class FrozenError(AttributeError): + msg: str = ... + +class FrozenInstanceError(FrozenError): ... +class FrozenAttributeError(FrozenError): ... +class AttrsAttributeNotFoundError(ValueError): ... +class NotAnAttrsClassError(ValueError): ... +class DefaultAlreadySetError(RuntimeError): ... +class UnannotatedAttributeError(RuntimeError): ... +class PythonTooOldError(RuntimeError): ... + +class NotCallableError(TypeError): + msg: str = ... + value: Any = ... + def __init__(self, msg: str, value: Any) -> None: ... diff --git a/py311/lib/python3.11/site-packages/attr/filters.py b/py311/lib/python3.11/site-packages/attr/filters.py new file mode 100644 index 0000000000000000000000000000000000000000..689b1705a60ff110d6077bab996f8b4588e55b82 --- /dev/null +++ b/py311/lib/python3.11/site-packages/attr/filters.py @@ -0,0 +1,72 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful filters for `attrs.asdict` and `attrs.astuple`. +""" + +from ._make import Attribute + + +def _split_what(what): + """ + Returns a tuple of `frozenset`s of classes and attributes. + """ + return ( + frozenset(cls for cls in what if isinstance(cls, type)), + frozenset(cls for cls in what if isinstance(cls, str)), + frozenset(cls for cls in what if isinstance(cls, Attribute)), + ) + + +def include(*what): + """ + Create a filter that only allows *what*. + + Args: + what (list[type, str, attrs.Attribute]): + What to include. Can be a type, a name, or an attribute. + + Returns: + Callable: + A callable that can be passed to `attrs.asdict`'s and + `attrs.astuple`'s *filter* argument. + + .. versionchanged:: 23.1.0 Accept strings with field names. + """ + cls, names, attrs = _split_what(what) + + def include_(attribute, value): + return ( + value.__class__ in cls + or attribute.name in names + or attribute in attrs + ) + + return include_ + + +def exclude(*what): + """ + Create a filter that does **not** allow *what*. + + Args: + what (list[type, str, attrs.Attribute]): + What to exclude. Can be a type, a name, or an attribute. + + Returns: + Callable: + A callable that can be passed to `attrs.asdict`'s and + `attrs.astuple`'s *filter* argument. + + .. versionchanged:: 23.3.0 Accept field name string as input argument + """ + cls, names, attrs = _split_what(what) + + def exclude_(attribute, value): + return not ( + value.__class__ in cls + or attribute.name in names + or attribute in attrs + ) + + return exclude_ diff --git a/py311/lib/python3.11/site-packages/attr/filters.pyi b/py311/lib/python3.11/site-packages/attr/filters.pyi new file mode 100644 index 0000000000000000000000000000000000000000..974abdcdb51152393d9c9e460c21aa025c45880c --- /dev/null +++ b/py311/lib/python3.11/site-packages/attr/filters.pyi @@ -0,0 +1,6 @@ +from typing import Any + +from . import Attribute, _FilterType + +def include(*what: type | str | Attribute[Any]) -> _FilterType[Any]: ... +def exclude(*what: type | str | Attribute[Any]) -> _FilterType[Any]: ... diff --git a/py311/lib/python3.11/site-packages/attr/py.typed b/py311/lib/python3.11/site-packages/attr/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/attr/setters.py b/py311/lib/python3.11/site-packages/attr/setters.py new file mode 100644 index 0000000000000000000000000000000000000000..78b08398a6713fc5fa827c2dc853e0d05de743c4 --- /dev/null +++ b/py311/lib/python3.11/site-packages/attr/setters.py @@ -0,0 +1,79 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly used hooks for on_setattr. +""" + +from . import _config +from .exceptions import FrozenAttributeError + + +def pipe(*setters): + """ + Run all *setters* and return the return value of the last one. + + .. versionadded:: 20.1.0 + """ + + def wrapped_pipe(instance, attrib, new_value): + rv = new_value + + for setter in setters: + rv = setter(instance, attrib, rv) + + return rv + + return wrapped_pipe + + +def frozen(_, __, ___): + """ + Prevent an attribute to be modified. + + .. versionadded:: 20.1.0 + """ + raise FrozenAttributeError + + +def validate(instance, attrib, new_value): + """ + Run *attrib*'s validator on *new_value* if it has one. + + .. versionadded:: 20.1.0 + """ + if _config._run_validators is False: + return new_value + + v = attrib.validator + if not v: + return new_value + + v(instance, attrib, new_value) + + return new_value + + +def convert(instance, attrib, new_value): + """ + Run *attrib*'s converter -- if it has one -- on *new_value* and return the + result. + + .. versionadded:: 20.1.0 + """ + c = attrib.converter + if c: + # This can be removed once we drop 3.8 and use attrs.Converter instead. + from ._make import Converter + + if not isinstance(c, Converter): + return c(new_value) + + return c(new_value, instance, attrib) + + return new_value + + +# Sentinel for disabling class-wide *on_setattr* hooks for certain attributes. +# Sphinx's autodata stopped working, so the docstring is inlined in the API +# docs. +NO_OP = object() diff --git a/py311/lib/python3.11/site-packages/attr/setters.pyi b/py311/lib/python3.11/site-packages/attr/setters.pyi new file mode 100644 index 0000000000000000000000000000000000000000..73abf36e7d5b0f5f56e7fddeee716824c1c60d58 --- /dev/null +++ b/py311/lib/python3.11/site-packages/attr/setters.pyi @@ -0,0 +1,20 @@ +from typing import Any, NewType, NoReturn, TypeVar + +from . import Attribute +from attrs import _OnSetAttrType + +_T = TypeVar("_T") + +def frozen( + instance: Any, attribute: Attribute[Any], new_value: Any +) -> NoReturn: ... +def pipe(*setters: _OnSetAttrType) -> _OnSetAttrType: ... +def validate(instance: Any, attribute: Attribute[_T], new_value: _T) -> _T: ... + +# convert is allowed to return Any, because they can be chained using pipe. +def convert( + instance: Any, attribute: Attribute[Any], new_value: Any +) -> Any: ... + +_NoOpType = NewType("_NoOpType", object) +NO_OP: _NoOpType diff --git a/py311/lib/python3.11/site-packages/attr/validators.py b/py311/lib/python3.11/site-packages/attr/validators.py new file mode 100644 index 0000000000000000000000000000000000000000..837e003b6c2e5ce485d0b1cd19bfa398f18a8d18 --- /dev/null +++ b/py311/lib/python3.11/site-packages/attr/validators.py @@ -0,0 +1,748 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful validators. +""" + +import operator +import re + +from contextlib import contextmanager +from re import Pattern + +from ._config import get_run_validators, set_run_validators +from ._make import _AndValidator, and_, attrib, attrs +from .converters import default_if_none +from .exceptions import NotCallableError + + +__all__ = [ + "and_", + "deep_iterable", + "deep_mapping", + "disabled", + "ge", + "get_disabled", + "gt", + "in_", + "instance_of", + "is_callable", + "le", + "lt", + "matches_re", + "max_len", + "min_len", + "not_", + "optional", + "or_", + "set_disabled", +] + + +def set_disabled(disabled): + """ + Globally disable or enable running validators. + + By default, they are run. + + Args: + disabled (bool): If `True`, disable running all validators. + + .. warning:: + + This function is not thread-safe! + + .. versionadded:: 21.3.0 + """ + set_run_validators(not disabled) + + +def get_disabled(): + """ + Return a bool indicating whether validators are currently disabled or not. + + Returns: + bool:`True` if validators are currently disabled. + + .. versionadded:: 21.3.0 + """ + return not get_run_validators() + + +@contextmanager +def disabled(): + """ + Context manager that disables running validators within its context. + + .. warning:: + + This context manager is not thread-safe! + + .. versionadded:: 21.3.0 + """ + set_run_validators(False) + try: + yield + finally: + set_run_validators(True) + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _InstanceOfValidator: + type = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not isinstance(value, self.type): + msg = f"'{attr.name}' must be {self.type!r} (got {value!r} that is a {value.__class__!r})." + raise TypeError( + msg, + attr, + self.type, + value, + ) + + def __repr__(self): + return f"" + + +def instance_of(type): + """ + A validator that raises a `TypeError` if the initializer is called with a + wrong type for this particular attribute (checks are performed using + `isinstance` therefore it's also valid to pass a tuple of types). + + Args: + type (type | tuple[type]): The type to check for. + + Raises: + TypeError: + With a human readable error message, the attribute (of type + `attrs.Attribute`), the expected type, and the value it got. + """ + return _InstanceOfValidator(type) + + +@attrs(repr=False, frozen=True, slots=True) +class _MatchesReValidator: + pattern = attrib() + match_func = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not self.match_func(value): + msg = f"'{attr.name}' must match regex {self.pattern.pattern!r} ({value!r} doesn't)" + raise ValueError( + msg, + attr, + self.pattern, + value, + ) + + def __repr__(self): + return f"" + + +def matches_re(regex, flags=0, func=None): + r""" + A validator that raises `ValueError` if the initializer is called with a + string that doesn't match *regex*. + + Args: + regex (str, re.Pattern): + A regex string or precompiled pattern to match against + + flags (int): + Flags that will be passed to the underlying re function (default 0) + + func (typing.Callable): + Which underlying `re` function to call. Valid options are + `re.fullmatch`, `re.search`, and `re.match`; the default `None` + means `re.fullmatch`. For performance reasons, the pattern is + always precompiled using `re.compile`. + + .. versionadded:: 19.2.0 + .. versionchanged:: 21.3.0 *regex* can be a pre-compiled pattern. + """ + valid_funcs = (re.fullmatch, None, re.search, re.match) + if func not in valid_funcs: + msg = "'func' must be one of {}.".format( + ", ".join( + sorted((e and e.__name__) or "None" for e in set(valid_funcs)) + ) + ) + raise ValueError(msg) + + if isinstance(regex, Pattern): + if flags: + msg = "'flags' can only be used with a string pattern; pass flags to re.compile() instead" + raise TypeError(msg) + pattern = regex + else: + pattern = re.compile(regex, flags) + + if func is re.match: + match_func = pattern.match + elif func is re.search: + match_func = pattern.search + else: + match_func = pattern.fullmatch + + return _MatchesReValidator(pattern, match_func) + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _OptionalValidator: + validator = attrib() + + def __call__(self, inst, attr, value): + if value is None: + return + + self.validator(inst, attr, value) + + def __repr__(self): + return f"" + + +def optional(validator): + """ + A validator that makes an attribute optional. An optional attribute is one + which can be set to `None` in addition to satisfying the requirements of + the sub-validator. + + Args: + validator + (typing.Callable | tuple[typing.Callable] | list[typing.Callable]): + A validator (or validators) that is used for non-`None` values. + + .. versionadded:: 15.1.0 + .. versionchanged:: 17.1.0 *validator* can be a list of validators. + .. versionchanged:: 23.1.0 *validator* can also be a tuple of validators. + """ + if isinstance(validator, (list, tuple)): + return _OptionalValidator(_AndValidator(validator)) + + return _OptionalValidator(validator) + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _InValidator: + options = attrib() + _original_options = attrib(hash=False) + + def __call__(self, inst, attr, value): + try: + in_options = value in self.options + except TypeError: # e.g. `1 in "abc"` + in_options = False + + if not in_options: + msg = f"'{attr.name}' must be in {self._original_options!r} (got {value!r})" + raise ValueError( + msg, + attr, + self._original_options, + value, + ) + + def __repr__(self): + return f"" + + +def in_(options): + """ + A validator that raises a `ValueError` if the initializer is called with a + value that does not belong in the *options* provided. + + The check is performed using ``value in options``, so *options* has to + support that operation. + + To keep the validator hashable, dicts, lists, and sets are transparently + transformed into a `tuple`. + + Args: + options: Allowed options. + + Raises: + ValueError: + With a human readable error message, the attribute (of type + `attrs.Attribute`), the expected options, and the value it got. + + .. versionadded:: 17.1.0 + .. versionchanged:: 22.1.0 + The ValueError was incomplete until now and only contained the human + readable error message. Now it contains all the information that has + been promised since 17.1.0. + .. versionchanged:: 24.1.0 + *options* that are a list, dict, or a set are now transformed into a + tuple to keep the validator hashable. + """ + repr_options = options + if isinstance(options, (list, dict, set)): + options = tuple(options) + + return _InValidator(options, repr_options) + + +@attrs(repr=False, slots=False, unsafe_hash=True) +class _IsCallableValidator: + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not callable(value): + message = ( + "'{name}' must be callable " + "(got {value!r} that is a {actual!r})." + ) + raise NotCallableError( + msg=message.format( + name=attr.name, value=value, actual=value.__class__ + ), + value=value, + ) + + def __repr__(self): + return "" + + +def is_callable(): + """ + A validator that raises a `attrs.exceptions.NotCallableError` if the + initializer is called with a value for this particular attribute that is + not callable. + + .. versionadded:: 19.1.0 + + Raises: + attrs.exceptions.NotCallableError: + With a human readable error message containing the attribute + (`attrs.Attribute`) name, and the value it got. + """ + return _IsCallableValidator() + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _DeepIterable: + member_validator = attrib(validator=is_callable()) + iterable_validator = attrib( + default=None, validator=optional(is_callable()) + ) + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if self.iterable_validator is not None: + self.iterable_validator(inst, attr, value) + + for member in value: + self.member_validator(inst, attr, member) + + def __repr__(self): + iterable_identifier = ( + "" + if self.iterable_validator is None + else f" {self.iterable_validator!r}" + ) + return ( + f"" + ) + + +def deep_iterable(member_validator, iterable_validator=None): + """ + A validator that performs deep validation of an iterable. + + Args: + member_validator: Validator(s) to apply to iterable members. + + iterable_validator: + Validator(s) to apply to iterable itself (optional). + + Raises + TypeError: if any sub-validators fail + + .. versionadded:: 19.1.0 + + .. versionchanged:: 25.4.0 + *member_validator* and *iterable_validator* can now be a list or tuple + of validators. + """ + if isinstance(member_validator, (list, tuple)): + member_validator = and_(*member_validator) + if isinstance(iterable_validator, (list, tuple)): + iterable_validator = and_(*iterable_validator) + return _DeepIterable(member_validator, iterable_validator) + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _DeepMapping: + key_validator = attrib(validator=optional(is_callable())) + value_validator = attrib(validator=optional(is_callable())) + mapping_validator = attrib(validator=optional(is_callable())) + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if self.mapping_validator is not None: + self.mapping_validator(inst, attr, value) + + for key in value: + if self.key_validator is not None: + self.key_validator(inst, attr, key) + if self.value_validator is not None: + self.value_validator(inst, attr, value[key]) + + def __repr__(self): + return f"" + + +def deep_mapping( + key_validator=None, value_validator=None, mapping_validator=None +): + """ + A validator that performs deep validation of a dictionary. + + All validators are optional, but at least one of *key_validator* or + *value_validator* must be provided. + + Args: + key_validator: Validator(s) to apply to dictionary keys. + + value_validator: Validator(s) to apply to dictionary values. + + mapping_validator: + Validator(s) to apply to top-level mapping attribute. + + .. versionadded:: 19.1.0 + + .. versionchanged:: 25.4.0 + *key_validator* and *value_validator* are now optional, but at least one + of them must be provided. + + .. versionchanged:: 25.4.0 + *key_validator*, *value_validator*, and *mapping_validator* can now be a + list or tuple of validators. + + Raises: + TypeError: If any sub-validator fails on validation. + + ValueError: + If neither *key_validator* nor *value_validator* is provided on + instantiation. + """ + if key_validator is None and value_validator is None: + msg = ( + "At least one of key_validator or value_validator must be provided" + ) + raise ValueError(msg) + + if isinstance(key_validator, (list, tuple)): + key_validator = and_(*key_validator) + if isinstance(value_validator, (list, tuple)): + value_validator = and_(*value_validator) + if isinstance(mapping_validator, (list, tuple)): + mapping_validator = and_(*mapping_validator) + + return _DeepMapping(key_validator, value_validator, mapping_validator) + + +@attrs(repr=False, frozen=True, slots=True) +class _NumberValidator: + bound = attrib() + compare_op = attrib() + compare_func = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not self.compare_func(value, self.bound): + msg = f"'{attr.name}' must be {self.compare_op} {self.bound}: {value}" + raise ValueError(msg) + + def __repr__(self): + return f"" + + +def lt(val): + """ + A validator that raises `ValueError` if the initializer is called with a + number larger or equal to *val*. + + The validator uses `operator.lt` to compare the values. + + Args: + val: Exclusive upper bound for values. + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, "<", operator.lt) + + +def le(val): + """ + A validator that raises `ValueError` if the initializer is called with a + number greater than *val*. + + The validator uses `operator.le` to compare the values. + + Args: + val: Inclusive upper bound for values. + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, "<=", operator.le) + + +def ge(val): + """ + A validator that raises `ValueError` if the initializer is called with a + number smaller than *val*. + + The validator uses `operator.ge` to compare the values. + + Args: + val: Inclusive lower bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, ">=", operator.ge) + + +def gt(val): + """ + A validator that raises `ValueError` if the initializer is called with a + number smaller or equal to *val*. + + The validator uses `operator.gt` to compare the values. + + Args: + val: Exclusive lower bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, ">", operator.gt) + + +@attrs(repr=False, frozen=True, slots=True) +class _MaxLengthValidator: + max_length = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if len(value) > self.max_length: + msg = f"Length of '{attr.name}' must be <= {self.max_length}: {len(value)}" + raise ValueError(msg) + + def __repr__(self): + return f"" + + +def max_len(length): + """ + A validator that raises `ValueError` if the initializer is called + with a string or iterable that is longer than *length*. + + Args: + length (int): Maximum length of the string or iterable + + .. versionadded:: 21.3.0 + """ + return _MaxLengthValidator(length) + + +@attrs(repr=False, frozen=True, slots=True) +class _MinLengthValidator: + min_length = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if len(value) < self.min_length: + msg = f"Length of '{attr.name}' must be >= {self.min_length}: {len(value)}" + raise ValueError(msg) + + def __repr__(self): + return f"" + + +def min_len(length): + """ + A validator that raises `ValueError` if the initializer is called + with a string or iterable that is shorter than *length*. + + Args: + length (int): Minimum length of the string or iterable + + .. versionadded:: 22.1.0 + """ + return _MinLengthValidator(length) + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _SubclassOfValidator: + type = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not issubclass(value, self.type): + msg = f"'{attr.name}' must be a subclass of {self.type!r} (got {value!r})." + raise TypeError( + msg, + attr, + self.type, + value, + ) + + def __repr__(self): + return f"" + + +def _subclass_of(type): + """ + A validator that raises a `TypeError` if the initializer is called with a + wrong type for this particular attribute (checks are performed using + `issubclass` therefore it's also valid to pass a tuple of types). + + Args: + type (type | tuple[type, ...]): The type(s) to check for. + + Raises: + TypeError: + With a human readable error message, the attribute (of type + `attrs.Attribute`), the expected type, and the value it got. + """ + return _SubclassOfValidator(type) + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _NotValidator: + validator = attrib() + msg = attrib( + converter=default_if_none( + "not_ validator child '{validator!r}' " + "did not raise a captured error" + ) + ) + exc_types = attrib( + validator=deep_iterable( + member_validator=_subclass_of(Exception), + iterable_validator=instance_of(tuple), + ), + ) + + def __call__(self, inst, attr, value): + try: + self.validator(inst, attr, value) + except self.exc_types: + pass # suppress error to invert validity + else: + raise ValueError( + self.msg.format( + validator=self.validator, + exc_types=self.exc_types, + ), + attr, + self.validator, + value, + self.exc_types, + ) + + def __repr__(self): + return f"" + + +def not_(validator, *, msg=None, exc_types=(ValueError, TypeError)): + """ + A validator that wraps and logically 'inverts' the validator passed to it. + It will raise a `ValueError` if the provided validator *doesn't* raise a + `ValueError` or `TypeError` (by default), and will suppress the exception + if the provided validator *does*. + + Intended to be used with existing validators to compose logic without + needing to create inverted variants, for example, ``not_(in_(...))``. + + Args: + validator: A validator to be logically inverted. + + msg (str): + Message to raise if validator fails. Formatted with keys + ``exc_types`` and ``validator``. + + exc_types (tuple[type, ...]): + Exception type(s) to capture. Other types raised by child + validators will not be intercepted and pass through. + + Raises: + ValueError: + With a human readable error message, the attribute (of type + `attrs.Attribute`), the validator that failed to raise an + exception, the value it got, and the expected exception types. + + .. versionadded:: 22.2.0 + """ + try: + exc_types = tuple(exc_types) + except TypeError: + exc_types = (exc_types,) + return _NotValidator(validator, msg, exc_types) + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _OrValidator: + validators = attrib() + + def __call__(self, inst, attr, value): + for v in self.validators: + try: + v(inst, attr, value) + except Exception: # noqa: BLE001, PERF203, S112 + continue + else: + return + + msg = f"None of {self.validators!r} satisfied for value {value!r}" + raise ValueError(msg) + + def __repr__(self): + return f"" + + +def or_(*validators): + """ + A validator that composes multiple validators into one. + + When called on a value, it runs all wrapped validators until one of them is + satisfied. + + Args: + validators (~collections.abc.Iterable[typing.Callable]): + Arbitrary number of validators. + + Raises: + ValueError: + If no validator is satisfied. Raised with a human-readable error + message listing all the wrapped validators and the value that + failed all of them. + + .. versionadded:: 24.1.0 + """ + vals = [] + for v in validators: + vals.extend(v.validators if isinstance(v, _OrValidator) else [v]) + + return _OrValidator(tuple(vals)) diff --git a/py311/lib/python3.11/site-packages/attr/validators.pyi b/py311/lib/python3.11/site-packages/attr/validators.pyi new file mode 100644 index 0000000000000000000000000000000000000000..36a7e800c237f46755ddfca306f8d867a7f493db --- /dev/null +++ b/py311/lib/python3.11/site-packages/attr/validators.pyi @@ -0,0 +1,140 @@ +from types import UnionType +from typing import ( + Any, + AnyStr, + Callable, + Container, + ContextManager, + Iterable, + Mapping, + Match, + Pattern, + TypeVar, + overload, +) + +from attrs import _ValidatorType +from attrs import _ValidatorArgType + +_T = TypeVar("_T") +_T1 = TypeVar("_T1") +_T2 = TypeVar("_T2") +_T3 = TypeVar("_T3") +_T4 = TypeVar("_T4") +_T5 = TypeVar("_T5") +_T6 = TypeVar("_T6") +_I = TypeVar("_I", bound=Iterable) +_K = TypeVar("_K") +_V = TypeVar("_V") +_M = TypeVar("_M", bound=Mapping) + +def set_disabled(run: bool) -> None: ... +def get_disabled() -> bool: ... +def disabled() -> ContextManager[None]: ... + +# To be more precise on instance_of use some overloads. +# If there are more than 3 items in the tuple then we fall back to Any +@overload +def instance_of(type: type[_T]) -> _ValidatorType[_T]: ... +@overload +def instance_of(type: tuple[type[_T]]) -> _ValidatorType[_T]: ... +@overload +def instance_of( + type: tuple[type[_T1], type[_T2]], +) -> _ValidatorType[_T1 | _T2]: ... +@overload +def instance_of( + type: tuple[type[_T1], type[_T2], type[_T3]], +) -> _ValidatorType[_T1 | _T2 | _T3]: ... +@overload +def instance_of(type: tuple[type, ...]) -> _ValidatorType[Any]: ... +@overload +def instance_of(type: UnionType) -> _ValidatorType[Any]: ... +def optional( + validator: ( + _ValidatorType[_T] + | list[_ValidatorType[_T]] + | tuple[_ValidatorType[_T]] + ), +) -> _ValidatorType[_T | None]: ... +def in_(options: Container[_T]) -> _ValidatorType[_T]: ... +def and_(*validators: _ValidatorType[_T]) -> _ValidatorType[_T]: ... +def matches_re( + regex: Pattern[AnyStr] | AnyStr, + flags: int = ..., + func: Callable[[AnyStr, AnyStr, int], Match[AnyStr] | None] | None = ..., +) -> _ValidatorType[AnyStr]: ... +def deep_iterable( + member_validator: _ValidatorArgType[_T], + iterable_validator: _ValidatorArgType[_I] | None = ..., +) -> _ValidatorType[_I]: ... +@overload +def deep_mapping( + key_validator: _ValidatorArgType[_K], + value_validator: _ValidatorArgType[_V] | None = ..., + mapping_validator: _ValidatorArgType[_M] | None = ..., +) -> _ValidatorType[_M]: ... +@overload +def deep_mapping( + key_validator: _ValidatorArgType[_K] | None = ..., + value_validator: _ValidatorArgType[_V] = ..., + mapping_validator: _ValidatorArgType[_M] | None = ..., +) -> _ValidatorType[_M]: ... +def is_callable() -> _ValidatorType[_T]: ... +def lt(val: _T) -> _ValidatorType[_T]: ... +def le(val: _T) -> _ValidatorType[_T]: ... +def ge(val: _T) -> _ValidatorType[_T]: ... +def gt(val: _T) -> _ValidatorType[_T]: ... +def max_len(length: int) -> _ValidatorType[_T]: ... +def min_len(length: int) -> _ValidatorType[_T]: ... +def not_( + validator: _ValidatorType[_T], + *, + msg: str | None = None, + exc_types: type[Exception] | Iterable[type[Exception]] = ..., +) -> _ValidatorType[_T]: ... +@overload +def or_( + __v1: _ValidatorType[_T1], + __v2: _ValidatorType[_T2], +) -> _ValidatorType[_T1 | _T2]: ... +@overload +def or_( + __v1: _ValidatorType[_T1], + __v2: _ValidatorType[_T2], + __v3: _ValidatorType[_T3], +) -> _ValidatorType[_T1 | _T2 | _T3]: ... +@overload +def or_( + __v1: _ValidatorType[_T1], + __v2: _ValidatorType[_T2], + __v3: _ValidatorType[_T3], + __v4: _ValidatorType[_T4], +) -> _ValidatorType[_T1 | _T2 | _T3 | _T4]: ... +@overload +def or_( + __v1: _ValidatorType[_T1], + __v2: _ValidatorType[_T2], + __v3: _ValidatorType[_T3], + __v4: _ValidatorType[_T4], + __v5: _ValidatorType[_T5], +) -> _ValidatorType[_T1 | _T2 | _T3 | _T4 | _T5]: ... +@overload +def or_( + __v1: _ValidatorType[_T1], + __v2: _ValidatorType[_T2], + __v3: _ValidatorType[_T3], + __v4: _ValidatorType[_T4], + __v5: _ValidatorType[_T5], + __v6: _ValidatorType[_T6], +) -> _ValidatorType[_T1 | _T2 | _T3 | _T4 | _T5 | _T6]: ... +@overload +def or_( + __v1: _ValidatorType[Any], + __v2: _ValidatorType[Any], + __v3: _ValidatorType[Any], + __v4: _ValidatorType[Any], + __v5: _ValidatorType[Any], + __v6: _ValidatorType[Any], + *validators: _ValidatorType[Any], +) -> _ValidatorType[Any]: ... diff --git a/py311/lib/python3.11/site-packages/attrs-25.4.0.dist-info/INSTALLER b/py311/lib/python3.11/site-packages/attrs-25.4.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a --- /dev/null +++ b/py311/lib/python3.11/site-packages/attrs-25.4.0.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/py311/lib/python3.11/site-packages/attrs-25.4.0.dist-info/METADATA b/py311/lib/python3.11/site-packages/attrs-25.4.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..51128bb96f0b8fa07656422e8099343cfdadd4cd --- /dev/null +++ b/py311/lib/python3.11/site-packages/attrs-25.4.0.dist-info/METADATA @@ -0,0 +1,235 @@ +Metadata-Version: 2.4 +Name: attrs +Version: 25.4.0 +Summary: Classes Without Boilerplate +Project-URL: Documentation, https://www.attrs.org/ +Project-URL: Changelog, https://www.attrs.org/en/stable/changelog.html +Project-URL: GitHub, https://github.com/python-attrs/attrs +Project-URL: Funding, https://github.com/sponsors/hynek +Project-URL: Tidelift, https://tidelift.com/subscription/pkg/pypi-attrs?utm_source=pypi-attrs&utm_medium=pypi +Author-email: Hynek Schlawack +License-Expression: MIT +License-File: LICENSE +Keywords: attribute,boilerplate,class +Classifier: Development Status :: 5 - Production/Stable +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Typing :: Typed +Requires-Python: >=3.9 +Description-Content-Type: text/markdown + +

+ + attrs + +

+ + +*attrs* is the Python package that will bring back the **joy** of **writing classes** by relieving you from the drudgery of implementing object protocols (aka [dunder methods](https://www.attrs.org/en/latest/glossary.html#term-dunder-methods)). +Trusted by NASA for [Mars missions since 2020](https://github.com/readme/featured/nasa-ingenuity-helicopter)! + +Its main goal is to help you to write **concise** and **correct** software without slowing down your code. + + +## Sponsors + +*attrs* would not be possible without our [amazing sponsors](https://github.com/sponsors/hynek). +Especially those generously supporting us at the *The Organization* tier and higher: + + + +

+ + + + + + + + + +

+ + + +

+ Please consider joining them to help make attrs’s maintenance more sustainable! +

+ + + +## Example + +*attrs* gives you a class decorator and a way to declaratively define the attributes on that class: + + + +```pycon +>>> from attrs import asdict, define, make_class, Factory + +>>> @define +... class SomeClass: +... a_number: int = 42 +... list_of_numbers: list[int] = Factory(list) +... +... def hard_math(self, another_number): +... return self.a_number + sum(self.list_of_numbers) * another_number + + +>>> sc = SomeClass(1, [1, 2, 3]) +>>> sc +SomeClass(a_number=1, list_of_numbers=[1, 2, 3]) + +>>> sc.hard_math(3) +19 +>>> sc == SomeClass(1, [1, 2, 3]) +True +>>> sc != SomeClass(2, [3, 2, 1]) +True + +>>> asdict(sc) +{'a_number': 1, 'list_of_numbers': [1, 2, 3]} + +>>> SomeClass() +SomeClass(a_number=42, list_of_numbers=[]) + +>>> C = make_class("C", ["a", "b"]) +>>> C("foo", "bar") +C(a='foo', b='bar') +``` + +After *declaring* your attributes, *attrs* gives you: + +- a concise and explicit overview of the class's attributes, +- a nice human-readable `__repr__`, +- equality-checking methods, +- an initializer, +- and much more, + +*without* writing dull boilerplate code again and again and *without* runtime performance penalties. + +--- + +This example uses *attrs*'s modern APIs that have been introduced in version 20.1.0, and the *attrs* package import name that has been added in version 21.3.0. +The classic APIs (`@attr.s`, `attr.ib`, plus their serious-business aliases) and the `attr` package import name will remain **indefinitely**. + +Check out [*On The Core API Names*](https://www.attrs.org/en/latest/names.html) for an in-depth explanation! + + +### Hate Type Annotations!? + +No problem! +Types are entirely **optional** with *attrs*. +Simply assign `attrs.field()` to the attributes instead of annotating them with types: + +```python +from attrs import define, field + +@define +class SomeClass: + a_number = field(default=42) + list_of_numbers = field(factory=list) +``` + + +## Data Classes + +On the tin, *attrs* might remind you of `dataclasses` (and indeed, `dataclasses` [are a descendant](https://hynek.me/articles/import-attrs/) of *attrs*). +In practice it does a lot more and is more flexible. +For instance, it allows you to define [special handling of NumPy arrays for equality checks](https://www.attrs.org/en/stable/comparison.html#customization), allows more ways to [plug into the initialization process](https://www.attrs.org/en/stable/init.html#hooking-yourself-into-initialization), has a replacement for `__init_subclass__`, and allows for stepping through the generated methods using a debugger. + +For more details, please refer to our [comparison page](https://www.attrs.org/en/stable/why.html#data-classes), but generally speaking, we are more likely to commit crimes against nature to make things work that one would expect to work, but that are quite complicated in practice. + + +## Project Information + +- [**Changelog**](https://www.attrs.org/en/stable/changelog.html) +- [**Documentation**](https://www.attrs.org/) +- [**PyPI**](https://pypi.org/project/attrs/) +- [**Source Code**](https://github.com/python-attrs/attrs) +- [**Contributing**](https://github.com/python-attrs/attrs/blob/main/.github/CONTRIBUTING.md) +- [**Third-party Extensions**](https://github.com/python-attrs/attrs/wiki/Extensions-to-attrs) +- **Get Help**: use the `python-attrs` tag on [Stack Overflow](https://stackoverflow.com/questions/tagged/python-attrs) + + +### *attrs* for Enterprise + +Available as part of the [Tidelift Subscription](https://tidelift.com/?utm_source=lifter&utm_medium=referral&utm_campaign=hynek). + +The maintainers of *attrs* and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open source packages you use to build your applications. +Save time, reduce risk, and improve code health, while paying the maintainers of the exact packages you use. + +## Release Information + +### Backwards-incompatible Changes + +- Class-level `kw_only=True` behavior is now consistent with `dataclasses`. + + Previously, a class that sets `kw_only=True` makes all attributes keyword-only, including those from base classes. + If an attribute sets `kw_only=False`, that setting is ignored, and it is still made keyword-only. + + Now, only the attributes defined in that class that doesn't explicitly set `kw_only=False` are made keyword-only. + + This shouldn't be a problem for most users, unless you have a pattern like this: + + ```python + @attrs.define(kw_only=True) + class Base: + a: int + b: int = attrs.field(default=1, kw_only=False) + + @attrs.define + class Subclass(Base): + c: int + ``` + + Here, we have a `kw_only=True` *attrs* class (`Base`) with an attribute that sets `kw_only=False` and has a default (`Base.b`), and then create a subclass (`Subclass`) with required arguments (`Subclass.c`). + Previously this would work, since it would make `Base.b` keyword-only, but now this fails since `Base.b` is positional, and we have a required positional argument (`Subclass.c`) following another argument with defaults. + [#1457](https://github.com/python-attrs/attrs/issues/1457) + + +### Changes + +- Values passed to the `__init__()` method of `attrs` classes are now correctly passed to `__attrs_pre_init__()` instead of their default values (in cases where *kw_only* was not specified). + [#1427](https://github.com/python-attrs/attrs/issues/1427) +- Added support for Python 3.14 and [PEP 749](https://peps.python.org/pep-0749/). + [#1446](https://github.com/python-attrs/attrs/issues/1446), + [#1451](https://github.com/python-attrs/attrs/issues/1451) +- `attrs.validators.deep_mapping()` now allows to leave out either *key_validator* xor *value_validator*. + [#1448](https://github.com/python-attrs/attrs/issues/1448) +- `attrs.validators.deep_iterator()` and `attrs.validators.deep_mapping()` now accept lists and tuples for all validators and wrap them into a `attrs.validators.and_()`. + [#1449](https://github.com/python-attrs/attrs/issues/1449) +- Added a new **experimental** way to inspect classes: + + `attrs.inspect(cls)` returns the _effective_ class-wide parameters that were used by *attrs* to construct the class. + + The returned class is the same data structure that *attrs* uses internally to decide how to construct the final class. + [#1454](https://github.com/python-attrs/attrs/issues/1454) +- Fixed annotations for `attrs.field(converter=...)`. + Previously, a `tuple` of converters was only accepted if it had exactly one element. + [#1461](https://github.com/python-attrs/attrs/issues/1461) +- The performance of `attrs.asdict()` has been improved by 45–260%. + [#1463](https://github.com/python-attrs/attrs/issues/1463) +- The performance of `attrs.astuple()` has been improved by 49–270%. + [#1469](https://github.com/python-attrs/attrs/issues/1469) +- The type annotation for `attrs.validators.or_()` now allows for different types of validators. + + This was only an issue on Pyright. + [#1474](https://github.com/python-attrs/attrs/issues/1474) + + + +--- + +[Full changelog →](https://www.attrs.org/en/stable/changelog.html) diff --git a/py311/lib/python3.11/site-packages/attrs-25.4.0.dist-info/RECORD b/py311/lib/python3.11/site-packages/attrs-25.4.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..1a6aed1657e9b79ba62e923da5fface8a24be5e2 --- /dev/null +++ b/py311/lib/python3.11/site-packages/attrs-25.4.0.dist-info/RECORD @@ -0,0 +1,37 @@ +attr/__init__.py,sha256=fOYIvt1eGSqQre4uCS3sJWKZ0mwAuC8UD6qba5OS9_U,2057 +attr/__init__.pyi,sha256=IZkzIjvtbRqDWGkDBIF9dd12FgDa379JYq3GHnVOvFQ,11309 +attr/_cmp.py,sha256=3Nn1TjxllUYiX_nJoVnEkXoDk0hM1DYKj5DE7GZe4i0,4117 +attr/_cmp.pyi,sha256=U-_RU_UZOyPUEQzXE6RMYQQcjkZRY25wTH99sN0s7MM,368 +attr/_compat.py,sha256=x0g7iEUOnBVJC72zyFCgb1eKqyxS-7f2LGnNyZ_r95s,2829 +attr/_config.py,sha256=dGq3xR6fgZEF6UBt_L0T-eUHIB4i43kRmH0P28sJVw8,843 +attr/_funcs.py,sha256=Ix5IETTfz5F01F-12MF_CSFomIn2h8b67EVVz2gCtBE,16479 +attr/_make.py,sha256=NRJDGS8syg2h3YNflVNoK2FwR3CpdSZxx8M6lacwljA,104141 +attr/_next_gen.py,sha256=BQtCUlzwg2gWHTYXBQvrEYBnzBUrDvO57u0Py6UCPhc,26274 +attr/_typing_compat.pyi,sha256=XDP54TUn-ZKhD62TOQebmzrwFyomhUCoGRpclb6alRA,469 +attr/_version_info.py,sha256=w4R-FYC3NK_kMkGUWJlYP4cVAlH9HRaC-um3fcjYkHM,2222 +attr/_version_info.pyi,sha256=x_M3L3WuB7r_ULXAWjx959udKQ4HLB8l-hsc1FDGNvk,209 +attr/converters.py,sha256=GlDeOzPeTFgeBBLbj9G57Ez5lAk68uhSALRYJ_exe84,3861 +attr/converters.pyi,sha256=orU2bff-VjQa2kMDyvnMQV73oJT2WRyQuw4ZR1ym1bE,643 +attr/exceptions.py,sha256=HRFq4iybmv7-DcZwyjl6M1euM2YeJVK_hFxuaBGAngI,1977 +attr/exceptions.pyi,sha256=zZq8bCUnKAy9mDtBEw42ZhPhAUIHoTKedDQInJD883M,539 +attr/filters.py,sha256=ZBiKWLp3R0LfCZsq7X11pn9WX8NslS2wXM4jsnLOGc8,1795 +attr/filters.pyi,sha256=3J5BG-dTxltBk1_-RuNRUHrv2qu1v8v4aDNAQ7_mifA,208 +attr/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +attr/setters.py,sha256=5-dcT63GQK35ONEzSgfXCkbB7pPkaR-qv15mm4PVSzQ,1617 +attr/setters.pyi,sha256=NnVkaFU1BB4JB8E4JuXyrzTUgvtMpj8p3wBdJY7uix4,584 +attr/validators.py,sha256=1BnYGTuYvSucGEI4ju-RPNJteVzG0ZlfWpJiWoSFHQ8,21458 +attr/validators.pyi,sha256=ftmW3m4KJ3pQcIXAj-BejT7BY4ZfqrC1G-5W7XvoPds,4082 +attrs-25.4.0.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +attrs-25.4.0.dist-info/METADATA,sha256=2Rerxj7agcMRxiwdkt6lC2guqHAmkGKCH13nWWK7ZoQ,10473 +attrs-25.4.0.dist-info/RECORD,, +attrs-25.4.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +attrs-25.4.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87 +attrs-25.4.0.dist-info/licenses/LICENSE,sha256=iCEVyV38KvHutnFPjsbVy8q_Znyv-HKfQkINpj9xTp8,1109 +attrs/__init__.py,sha256=RxaAZNwYiEh-fcvHLZNpQ_DWKni73M_jxEPEftiq1Zc,1183 +attrs/__init__.pyi,sha256=2gV79g9UxJppGSM48hAZJ6h_MHb70dZoJL31ZNJeZYI,9416 +attrs/converters.py,sha256=8kQljrVwfSTRu8INwEk8SI0eGrzmWftsT7rM0EqyohM,76 +attrs/exceptions.py,sha256=ACCCmg19-vDFaDPY9vFl199SPXCQMN_bENs4DALjzms,76 +attrs/filters.py,sha256=VOUMZug9uEU6dUuA0dF1jInUK0PL3fLgP0VBS5d-CDE,73 +attrs/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +attrs/setters.py,sha256=eL1YidYQV3T2h9_SYIZSZR1FAcHGb1TuCTy0E0Lv2SU,73 +attrs/validators.py,sha256=xcy6wD5TtTkdCG1f4XWbocPSO0faBjk5IfVJfP6SUj0,76 diff --git a/py311/lib/python3.11/site-packages/attrs-25.4.0.dist-info/REQUESTED b/py311/lib/python3.11/site-packages/attrs-25.4.0.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/attrs-25.4.0.dist-info/WHEEL b/py311/lib/python3.11/site-packages/attrs-25.4.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..12228d414b6cfed7c39d3781c85c63256a1d7fb5 --- /dev/null +++ b/py311/lib/python3.11/site-packages/attrs-25.4.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.27.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/py311/lib/python3.11/site-packages/attrs-25.4.0.dist-info/licenses/LICENSE b/py311/lib/python3.11/site-packages/attrs-25.4.0.dist-info/licenses/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..2bd6453d255e19b973f19b128596a8b6dd65b2c3 --- /dev/null +++ b/py311/lib/python3.11/site-packages/attrs-25.4.0.dist-info/licenses/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Hynek Schlawack and the attrs contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/py311/lib/python3.11/site-packages/attrs/__init__.py b/py311/lib/python3.11/site-packages/attrs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dc1ce4b974ea1b3971ced44e3e112e897cf29b0a --- /dev/null +++ b/py311/lib/python3.11/site-packages/attrs/__init__.py @@ -0,0 +1,72 @@ +# SPDX-License-Identifier: MIT + +from attr import ( + NOTHING, + Attribute, + AttrsInstance, + Converter, + Factory, + NothingType, + _make_getattr, + assoc, + cmp_using, + define, + evolve, + field, + fields, + fields_dict, + frozen, + has, + make_class, + mutable, + resolve_types, + validate, +) +from attr._make import ClassProps +from attr._next_gen import asdict, astuple, inspect + +from . import converters, exceptions, filters, setters, validators + + +__all__ = [ + "NOTHING", + "Attribute", + "AttrsInstance", + "ClassProps", + "Converter", + "Factory", + "NothingType", + "__author__", + "__copyright__", + "__description__", + "__doc__", + "__email__", + "__license__", + "__title__", + "__url__", + "__version__", + "__version_info__", + "asdict", + "assoc", + "astuple", + "cmp_using", + "converters", + "define", + "evolve", + "exceptions", + "field", + "fields", + "fields_dict", + "filters", + "frozen", + "has", + "inspect", + "make_class", + "mutable", + "resolve_types", + "setters", + "validate", + "validators", +] + +__getattr__ = _make_getattr(__name__) diff --git a/py311/lib/python3.11/site-packages/attrs/__init__.pyi b/py311/lib/python3.11/site-packages/attrs/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..6364bac4ea29f860934803e5f3ba741812c6dfd1 --- /dev/null +++ b/py311/lib/python3.11/site-packages/attrs/__init__.pyi @@ -0,0 +1,314 @@ +import sys + +from typing import ( + Any, + Callable, + Mapping, + Sequence, + overload, + TypeVar, +) + +# Because we need to type our own stuff, we have to make everything from +# attr explicitly public too. +from attr import __author__ as __author__ +from attr import __copyright__ as __copyright__ +from attr import __description__ as __description__ +from attr import __email__ as __email__ +from attr import __license__ as __license__ +from attr import __title__ as __title__ +from attr import __url__ as __url__ +from attr import __version__ as __version__ +from attr import __version_info__ as __version_info__ +from attr import assoc as assoc +from attr import Attribute as Attribute +from attr import AttrsInstance as AttrsInstance +from attr import cmp_using as cmp_using +from attr import converters as converters +from attr import Converter as Converter +from attr import evolve as evolve +from attr import exceptions as exceptions +from attr import Factory as Factory +from attr import fields as fields +from attr import fields_dict as fields_dict +from attr import filters as filters +from attr import has as has +from attr import make_class as make_class +from attr import NOTHING as NOTHING +from attr import resolve_types as resolve_types +from attr import setters as setters +from attr import validate as validate +from attr import validators as validators +from attr import attrib, asdict as asdict, astuple as astuple +from attr import NothingType as NothingType + +if sys.version_info >= (3, 11): + from typing import dataclass_transform +else: + from typing_extensions import dataclass_transform + +_T = TypeVar("_T") +_C = TypeVar("_C", bound=type) + +_EqOrderType = bool | Callable[[Any], Any] +_ValidatorType = Callable[[Any, "Attribute[_T]", _T], Any] +_CallableConverterType = Callable[[Any], Any] +_ConverterType = _CallableConverterType | Converter[Any, Any] +_ReprType = Callable[[Any], str] +_ReprArgType = bool | _ReprType +_OnSetAttrType = Callable[[Any, "Attribute[Any]", Any], Any] +_OnSetAttrArgType = _OnSetAttrType | list[_OnSetAttrType] | setters._NoOpType +_FieldTransformer = Callable[ + [type, list["Attribute[Any]"]], list["Attribute[Any]"] +] +# FIXME: in reality, if multiple validators are passed they must be in a list +# or tuple, but those are invariant and so would prevent subtypes of +# _ValidatorType from working when passed in a list or tuple. +_ValidatorArgType = _ValidatorType[_T] | Sequence[_ValidatorType[_T]] + +@overload +def field( + *, + default: None = ..., + validator: None = ..., + repr: _ReprArgType = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + converter: None = ..., + factory: None = ..., + kw_only: bool | None = ..., + eq: bool | None = ..., + order: bool | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., + type: type | None = ..., +) -> Any: ... + +# This form catches an explicit None or no default and infers the type from the +# other arguments. +@overload +def field( + *, + default: None = ..., + validator: _ValidatorArgType[_T] | None = ..., + repr: _ReprArgType = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + converter: _ConverterType + | list[_ConverterType] + | tuple[_ConverterType, ...] + | None = ..., + factory: Callable[[], _T] | None = ..., + kw_only: bool | None = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., + type: type | None = ..., +) -> _T: ... + +# This form catches an explicit default argument. +@overload +def field( + *, + default: _T, + validator: _ValidatorArgType[_T] | None = ..., + repr: _ReprArgType = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + converter: _ConverterType + | list[_ConverterType] + | tuple[_ConverterType, ...] + | None = ..., + factory: Callable[[], _T] | None = ..., + kw_only: bool | None = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., + type: type | None = ..., +) -> _T: ... + +# This form covers type=non-Type: e.g. forward references (str), Any +@overload +def field( + *, + default: _T | None = ..., + validator: _ValidatorArgType[_T] | None = ..., + repr: _ReprArgType = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + converter: _ConverterType + | list[_ConverterType] + | tuple[_ConverterType, ...] + | None = ..., + factory: Callable[[], _T] | None = ..., + kw_only: bool | None = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., + type: type | None = ..., +) -> Any: ... +@overload +@dataclass_transform(field_specifiers=(attrib, field)) +def define( + maybe_cls: _C, + *, + these: dict[str, Any] | None = ..., + repr: bool = ..., + unsafe_hash: bool | None = ..., + hash: bool | None = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: bool | None = ..., + order: bool | None = ..., + auto_detect: bool = ..., + getstate_setstate: bool | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + field_transformer: _FieldTransformer | None = ..., + match_args: bool = ..., +) -> _C: ... +@overload +@dataclass_transform(field_specifiers=(attrib, field)) +def define( + maybe_cls: None = ..., + *, + these: dict[str, Any] | None = ..., + repr: bool = ..., + unsafe_hash: bool | None = ..., + hash: bool | None = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: bool | None = ..., + order: bool | None = ..., + auto_detect: bool = ..., + getstate_setstate: bool | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + field_transformer: _FieldTransformer | None = ..., + match_args: bool = ..., +) -> Callable[[_C], _C]: ... + +mutable = define + +@overload +@dataclass_transform(frozen_default=True, field_specifiers=(attrib, field)) +def frozen( + maybe_cls: _C, + *, + these: dict[str, Any] | None = ..., + repr: bool = ..., + unsafe_hash: bool | None = ..., + hash: bool | None = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: bool | None = ..., + order: bool | None = ..., + auto_detect: bool = ..., + getstate_setstate: bool | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + field_transformer: _FieldTransformer | None = ..., + match_args: bool = ..., +) -> _C: ... +@overload +@dataclass_transform(frozen_default=True, field_specifiers=(attrib, field)) +def frozen( + maybe_cls: None = ..., + *, + these: dict[str, Any] | None = ..., + repr: bool = ..., + unsafe_hash: bool | None = ..., + hash: bool | None = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: bool | None = ..., + order: bool | None = ..., + auto_detect: bool = ..., + getstate_setstate: bool | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + field_transformer: _FieldTransformer | None = ..., + match_args: bool = ..., +) -> Callable[[_C], _C]: ... + +class ClassProps: + # XXX: somehow when defining/using enums Mypy starts looking at our own + # (untyped) code and causes tons of errors. + Hashability: Any + KeywordOnly: Any + + is_exception: bool + is_slotted: bool + has_weakref_slot: bool + is_frozen: bool + # kw_only: ClassProps.KeywordOnly + kw_only: Any + collected_fields_by_mro: bool + added_init: bool + added_repr: bool + added_eq: bool + added_ordering: bool + # hashability: ClassProps.Hashability + hashability: Any + added_match_args: bool + added_str: bool + added_pickling: bool + on_setattr_hook: _OnSetAttrType | None + field_transformer: Callable[[Attribute[Any]], Attribute[Any]] | None + + def __init__( + self, + is_exception: bool, + is_slotted: bool, + has_weakref_slot: bool, + is_frozen: bool, + # kw_only: ClassProps.KeywordOnly + kw_only: Any, + collected_fields_by_mro: bool, + added_init: bool, + added_repr: bool, + added_eq: bool, + added_ordering: bool, + # hashability: ClassProps.Hashability + hashability: Any, + added_match_args: bool, + added_str: bool, + added_pickling: bool, + on_setattr_hook: _OnSetAttrType, + field_transformer: Callable[[Attribute[Any]], Attribute[Any]], + ) -> None: ... + @property + def is_hashable(self) -> bool: ... + +def inspect(cls: type) -> ClassProps: ... diff --git a/py311/lib/python3.11/site-packages/attrs/converters.py b/py311/lib/python3.11/site-packages/attrs/converters.py new file mode 100644 index 0000000000000000000000000000000000000000..7821f6c02cca81277d1ecc87b6bdafad886d8b70 --- /dev/null +++ b/py311/lib/python3.11/site-packages/attrs/converters.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.converters import * # noqa: F403 diff --git a/py311/lib/python3.11/site-packages/attrs/exceptions.py b/py311/lib/python3.11/site-packages/attrs/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..3323f9d2112c54b203763d45b455bd5abbe020f6 --- /dev/null +++ b/py311/lib/python3.11/site-packages/attrs/exceptions.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.exceptions import * # noqa: F403 diff --git a/py311/lib/python3.11/site-packages/attrs/filters.py b/py311/lib/python3.11/site-packages/attrs/filters.py new file mode 100644 index 0000000000000000000000000000000000000000..3080f48398e5ed8d3428ca3efeb7500633b0cb0f --- /dev/null +++ b/py311/lib/python3.11/site-packages/attrs/filters.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.filters import * # noqa: F403 diff --git a/py311/lib/python3.11/site-packages/attrs/py.typed b/py311/lib/python3.11/site-packages/attrs/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/attrs/setters.py b/py311/lib/python3.11/site-packages/attrs/setters.py new file mode 100644 index 0000000000000000000000000000000000000000..f3d73bb793dd49c138950961f41943bb26c57fde --- /dev/null +++ b/py311/lib/python3.11/site-packages/attrs/setters.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.setters import * # noqa: F403 diff --git a/py311/lib/python3.11/site-packages/attrs/validators.py b/py311/lib/python3.11/site-packages/attrs/validators.py new file mode 100644 index 0000000000000000000000000000000000000000..037e124f29f32d37c1642d159bf828de44f7c349 --- /dev/null +++ b/py311/lib/python3.11/site-packages/attrs/validators.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.validators import * # noqa: F403 diff --git a/py311/lib/python3.11/site-packages/backoff-2.2.1.dist-info/INSTALLER b/py311/lib/python3.11/site-packages/backoff-2.2.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a --- /dev/null +++ b/py311/lib/python3.11/site-packages/backoff-2.2.1.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/py311/lib/python3.11/site-packages/backoff-2.2.1.dist-info/LICENSE b/py311/lib/python3.11/site-packages/backoff-2.2.1.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..9dc3cea50bac35595e74acd86a29fcd69bb336e0 --- /dev/null +++ b/py311/lib/python3.11/site-packages/backoff-2.2.1.dist-info/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 litl, LLC. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/py311/lib/python3.11/site-packages/backoff-2.2.1.dist-info/METADATA b/py311/lib/python3.11/site-packages/backoff-2.2.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..adcb9c7ca4b5f4641f537fd925e714c8ab8bb20c --- /dev/null +++ b/py311/lib/python3.11/site-packages/backoff-2.2.1.dist-info/METADATA @@ -0,0 +1,419 @@ +Metadata-Version: 2.1 +Name: backoff +Version: 2.2.1 +Summary: Function decoration for backoff and retry +Home-page: https://github.com/litl/backoff +License: MIT +Keywords: retry,backoff,decorators +Author: Bob Green +Author-email: rgreen@aquent.com +Requires-Python: >=3.7,<4.0 +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Natural Language :: English +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Topic :: Internet :: WWW/HTTP +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Utilities +Project-URL: Repository, https://github.com/litl/backoff +Description-Content-Type: text/x-rst + +backoff +======= + +.. image:: https://travis-ci.org/litl/backoff.svg + :target: https://travis-ci.org/litl/backoff +.. image:: https://coveralls.io/repos/litl/backoff/badge.svg + :target: https://coveralls.io/r/litl/backoff?branch=python-3 +.. image:: https://github.com/litl/backoff/workflows/CodeQL/badge.svg + :target: https://github.com/litl/backoff/actions/workflows/codeql-analysis.yml +.. image:: https://img.shields.io/pypi/v/backoff.svg + :target: https://pypi.python.org/pypi/backoff +.. image:: https://img.shields.io/github/license/litl/backoff + :target: https://github.com/litl/backoff/blob/master/LICENSE + +**Function decoration for backoff and retry** + +This module provides function decorators which can be used to wrap a +function such that it will be retried until some condition is met. It +is meant to be of use when accessing unreliable resources with the +potential for intermittent failures i.e. network resources and external +APIs. Somewhat more generally, it may also be of use for dynamically +polling resources for externally generated content. + +Decorators support both regular functions for synchronous code and +`asyncio `__'s coroutines +for asynchronous code. + +Examples +======== + +Since Kenneth Reitz's `requests `_ module +has become a defacto standard for synchronous HTTP clients in Python, +networking examples below are written using it, but it is in no way required +by the backoff module. + +@backoff.on_exception +--------------------- + +The ``on_exception`` decorator is used to retry when a specified exception +is raised. Here's an example using exponential backoff when any +``requests`` exception is raised: + +.. code-block:: python + + @backoff.on_exception(backoff.expo, + requests.exceptions.RequestException) + def get_url(url): + return requests.get(url) + +The decorator will also accept a tuple of exceptions for cases where +the same backoff behavior is desired for more than one exception type: + +.. code-block:: python + + @backoff.on_exception(backoff.expo, + (requests.exceptions.Timeout, + requests.exceptions.ConnectionError)) + def get_url(url): + return requests.get(url) + +**Give Up Conditions** + +Optional keyword arguments can specify conditions under which to give +up. + +The keyword argument ``max_time`` specifies the maximum amount +of total time in seconds that can elapse before giving up. + +.. code-block:: python + + @backoff.on_exception(backoff.expo, + requests.exceptions.RequestException, + max_time=60) + def get_url(url): + return requests.get(url) + + +Keyword argument ``max_tries`` specifies the maximum number of calls +to make to the target function before giving up. + +.. code-block:: python + + @backoff.on_exception(backoff.expo, + requests.exceptions.RequestException, + max_tries=8, + jitter=None) + def get_url(url): + return requests.get(url) + + +In some cases the raised exception instance itself may need to be +inspected in order to determine if it is a retryable condition. The +``giveup`` keyword arg can be used to specify a function which accepts +the exception and returns a truthy value if the exception should not +be retried: + +.. code-block:: python + + def fatal_code(e): + return 400 <= e.response.status_code < 500 + + @backoff.on_exception(backoff.expo, + requests.exceptions.RequestException, + max_time=300, + giveup=fatal_code) + def get_url(url): + return requests.get(url) + +By default, when a give up event occurs, the exception in question is reraised +and so code calling an `on_exception`-decorated function may still +need to do exception handling. This behavior can optionally be disabled +using the `raise_on_giveup` keyword argument. + +In the code below, `requests.exceptions.RequestException` will not be raised +when giveup occurs. Note that the decorated function will return `None` in this +case, regardless of the logic in the `on_exception` handler. + +.. code-block:: python + + def fatal_code(e): + return 400 <= e.response.status_code < 500 + + @backoff.on_exception(backoff.expo, + requests.exceptions.RequestException, + max_time=300, + raise_on_giveup=False, + giveup=fatal_code) + def get_url(url): + return requests.get(url) + +This is useful for non-mission critical code where you still wish to retry +the code inside of `backoff.on_exception` but wish to proceed with execution +even if all retries fail. + +@backoff.on_predicate +--------------------- + +The ``on_predicate`` decorator is used to retry when a particular +condition is true of the return value of the target function. This may +be useful when polling a resource for externally generated content. + +Here's an example which uses a fibonacci sequence backoff when the +return value of the target function is the empty list: + +.. code-block:: python + + @backoff.on_predicate(backoff.fibo, lambda x: x == [], max_value=13) + def poll_for_messages(queue): + return queue.get() + +Extra keyword arguments are passed when initializing the +wait generator, so the ``max_value`` param above is passed as a keyword +arg when initializing the fibo generator. + +When not specified, the predicate param defaults to the falsey test, +so the above can more concisely be written: + +.. code-block:: python + + @backoff.on_predicate(backoff.fibo, max_value=13) + def poll_for_message(queue): + return queue.get() + +More simply, a function which continues polling every second until it +gets a non-falsey result could be defined like like this: + +.. code-block:: python + + @backoff.on_predicate(backoff.constant, jitter=None, interval=1) + def poll_for_message(queue): + return queue.get() + +The jitter is disabled in order to keep the polling frequency fixed. + +@backoff.runtime +---------------- + +You can also use the ``backoff.runtime`` generator to make use of the +return value or thrown exception of the decorated method. + +For example, to use the value in the ``Retry-After`` header of the response: + +.. code-block:: python + + @backoff.on_predicate( + backoff.runtime, + predicate=lambda r: r.status_code == 429, + value=lambda r: int(r.headers.get("Retry-After")), + jitter=None, + ) + def get_url(): + return requests.get(url) + +Jitter +------ + +A jitter algorithm can be supplied with the ``jitter`` keyword arg to +either of the backoff decorators. This argument should be a function +accepting the original unadulterated backoff value and returning it's +jittered counterpart. + +As of version 1.2, the default jitter function ``backoff.full_jitter`` +implements the 'Full Jitter' algorithm as defined in the AWS +Architecture Blog's `Exponential Backoff And Jitter +`_ post. +Note that with this algorithm, the time yielded by the wait generator +is actually the *maximum* amount of time to wait. + +Previous versions of backoff defaulted to adding some random number of +milliseconds (up to 1s) to the raw sleep value. If desired, this +behavior is now available as ``backoff.random_jitter``. + +Using multiple decorators +------------------------- + +The backoff decorators may also be combined to specify different +backoff behavior for different cases: + +.. code-block:: python + + @backoff.on_predicate(backoff.fibo, max_value=13) + @backoff.on_exception(backoff.expo, + requests.exceptions.HTTPError, + max_time=60) + @backoff.on_exception(backoff.expo, + requests.exceptions.Timeout, + max_time=300) + def poll_for_message(queue): + return queue.get() + + +Runtime Configuration +--------------------- + +The decorator functions ``on_exception`` and ``on_predicate`` are +generally evaluated at import time. This is fine when the keyword args +are passed as constant values, but suppose we want to consult a +dictionary with configuration options that only become available at +runtime. The relevant values are not available at import time. Instead, +decorator functions can be passed callables which are evaluated at +runtime to obtain the value: + +.. code-block:: python + + def lookup_max_time(): + # pretend we have a global reference to 'app' here + # and that it has a dictionary-like 'config' property + return app.config["BACKOFF_MAX_TIME"] + + @backoff.on_exception(backoff.expo, + ValueError, + max_time=lookup_max_time) + +Event handlers +-------------- + +Both backoff decorators optionally accept event handler functions +using the keyword arguments ``on_success``, ``on_backoff``, and ``on_giveup``. +This may be useful in reporting statistics or performing other custom +logging. + +Handlers must be callables with a unary signature accepting a dict +argument. This dict contains the details of the invocation. Valid keys +include: + +* *target*: reference to the function or method being invoked +* *args*: positional arguments to func +* *kwargs*: keyword arguments to func +* *tries*: number of invocation tries so far +* *elapsed*: elapsed time in seconds so far +* *wait*: seconds to wait (``on_backoff`` handler only) +* *value*: value triggering backoff (``on_predicate`` decorator only) + +A handler which prints the details of the backoff event could be +implemented like so: + +.. code-block:: python + + def backoff_hdlr(details): + print ("Backing off {wait:0.1f} seconds after {tries} tries " + "calling function {target} with args {args} and kwargs " + "{kwargs}".format(**details)) + + @backoff.on_exception(backoff.expo, + requests.exceptions.RequestException, + on_backoff=backoff_hdlr) + def get_url(url): + return requests.get(url) + +**Multiple handlers per event type** + +In all cases, iterables of handler functions are also accepted, which +are called in turn. For example, you might provide a simple list of +handler functions as the value of the ``on_backoff`` keyword arg: + +.. code-block:: python + + @backoff.on_exception(backoff.expo, + requests.exceptions.RequestException, + on_backoff=[backoff_hdlr1, backoff_hdlr2]) + def get_url(url): + return requests.get(url) + +**Getting exception info** + +In the case of the ``on_exception`` decorator, all ``on_backoff`` and +``on_giveup`` handlers are called from within the except block for the +exception being handled. Therefore exception info is available to the +handler functions via the python standard library, specifically +``sys.exc_info()`` or the ``traceback`` module. The exception is also +available at the *exception* key in the `details` dict passed to the +handlers. + +Asynchronous code +----------------- + +Backoff supports asynchronous execution in Python 3.5 and above. + +To use backoff in asynchronous code based on +`asyncio `__ +you simply need to apply ``backoff.on_exception`` or ``backoff.on_predicate`` +to coroutines. +You can also use coroutines for the ``on_success``, ``on_backoff``, and +``on_giveup`` event handlers, with the interface otherwise being identical. + +The following examples use `aiohttp `__ +asynchronous HTTP client/server library. + +.. code-block:: python + + @backoff.on_exception(backoff.expo, aiohttp.ClientError, max_time=60) + async def get_url(url): + async with aiohttp.ClientSession(raise_for_status=True) as session: + async with session.get(url) as response: + return await response.text() + +Logging configuration +--------------------- + +By default, backoff and retry attempts are logged to the 'backoff' +logger. By default, this logger is configured with a NullHandler, so +there will be nothing output unless you configure a handler. +Programmatically, this might be accomplished with something as simple +as: + +.. code-block:: python + + logging.getLogger('backoff').addHandler(logging.StreamHandler()) + +The default logging level is INFO, which corresponds to logging +anytime a retry event occurs. If you would instead like to log +only when a giveup event occurs, set the logger level to ERROR. + +.. code-block:: python + + logging.getLogger('backoff').setLevel(logging.ERROR) + +It is also possible to specify an alternate logger with the ``logger`` +keyword argument. If a string value is specified the logger will be +looked up by name. + +.. code-block:: python + + @backoff.on_exception(backoff.expo, + requests.exceptions.RequestException, + logger='my_logger') + # ... + +It is also supported to specify a Logger (or LoggerAdapter) object +directly. + +.. code-block:: python + + my_logger = logging.getLogger('my_logger') + my_handler = logging.StreamHandler() + my_logger.addHandler(my_handler) + my_logger.setLevel(logging.ERROR) + + @backoff.on_exception(backoff.expo, + requests.exceptions.RequestException, + logger=my_logger) + # ... + +Default logging can be disabled all together by specifying +``logger=None``. In this case, if desired alternative logging behavior +could be defined by using custom event handlers. + diff --git a/py311/lib/python3.11/site-packages/backoff-2.2.1.dist-info/RECORD b/py311/lib/python3.11/site-packages/backoff-2.2.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..51a2ed6e0cfc55fd74e678721c5526e8d8837bdd --- /dev/null +++ b/py311/lib/python3.11/site-packages/backoff-2.2.1.dist-info/RECORD @@ -0,0 +1,16 @@ +backoff-2.2.1.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +backoff-2.2.1.dist-info/LICENSE,sha256=KmtNX4hNTXob8E6n3xlEzxKzLjWnmobQoHWi0_QPuaw,1077 +backoff-2.2.1.dist-info/METADATA,sha256=Wgffksy-dcDJ4GaoqXyjc8XxrE0DQz3FbWwmrDqo-6U,14827 +backoff-2.2.1.dist-info/RECORD,, +backoff-2.2.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +backoff-2.2.1.dist-info/WHEEL,sha256=gSF7fibx4crkLz_A-IKR6kcuq0jJ64KNCkG8_bcaEao,88 +backoff/__init__.py,sha256=Jl49Ur_5GTiySyaw8URBXlfClWn0H7Pk5P95m1awNZ8,898 +backoff/_async.py,sha256=ZvqmfxxQ2o-UjQUQin12Ojc4eXOXb43RWSQaPaqbALI,6775 +backoff/_common.py,sha256=8s3_5AJH8hiHd9GR2PdKqiaeE2sdEUoyf6cW9OCo1F8,3478 +backoff/_decorator.py,sha256=EuYHrg8rSPaKJ_KeZ99WEg9knrfyn_-ck12Cwfcb68U,9804 +backoff/_jitter.py,sha256=LjJShpjryk9sWBCWiz-3UX1DJCx6rebNJ5Bf3nPMlYQ,782 +backoff/_sync.py,sha256=DT_ktufPPb0nut9WCAKe5UE7sTYRl9f93PRPX6jP8ro,4214 +backoff/_typing.py,sha256=RrJ50kqdeNZvSmoMjNoAUpMCm44V8qreAVgGb-KMl-g,1328 +backoff/_wait_gen.py,sha256=U5AR3Isf4aZs2SC0x9PGI3Wh8JR7XGkV90SiU56wWvw,2396 +backoff/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +backoff/types.py,sha256=4DGG6Ltcz0wVfXrk0YBOnp_oPpcki4c0BOnodRhgoqg,73 diff --git a/py311/lib/python3.11/site-packages/backoff-2.2.1.dist-info/REQUESTED b/py311/lib/python3.11/site-packages/backoff-2.2.1.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/backoff-2.2.1.dist-info/WHEEL b/py311/lib/python3.11/site-packages/backoff-2.2.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..5430e92e31e9ed6f3bd749246e812f6295b9e168 --- /dev/null +++ b/py311/lib/python3.11/site-packages/backoff-2.2.1.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: poetry-core 1.2.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/py311/lib/python3.11/site-packages/backoff/_sync.py b/py311/lib/python3.11/site-packages/backoff/_sync.py new file mode 100644 index 0000000000000000000000000000000000000000..4371e1680a78ed73dd31f2f30daf79799b27dc44 --- /dev/null +++ b/py311/lib/python3.11/site-packages/backoff/_sync.py @@ -0,0 +1,132 @@ +# coding:utf-8 +import datetime +import functools +import time +from datetime import timedelta + +from backoff._common import (_init_wait_gen, _maybe_call, _next_wait) + + +def _call_handlers(hdlrs, target, args, kwargs, tries, elapsed, **extra): + details = { + 'target': target, + 'args': args, + 'kwargs': kwargs, + 'tries': tries, + 'elapsed': elapsed, + } + details.update(extra) + for hdlr in hdlrs: + hdlr(details) + + +def retry_predicate(target, wait_gen, predicate, + *, + max_tries, max_time, jitter, + on_success, on_backoff, on_giveup, + wait_gen_kwargs): + + @functools.wraps(target) + def retry(*args, **kwargs): + max_tries_value = _maybe_call(max_tries) + max_time_value = _maybe_call(max_time) + + tries = 0 + start = datetime.datetime.now() + wait = _init_wait_gen(wait_gen, wait_gen_kwargs) + while True: + tries += 1 + elapsed = timedelta.total_seconds(datetime.datetime.now() - start) + details = { + "target": target, + "args": args, + "kwargs": kwargs, + "tries": tries, + "elapsed": elapsed, + } + + ret = target(*args, **kwargs) + if predicate(ret): + max_tries_exceeded = (tries == max_tries_value) + max_time_exceeded = (max_time_value is not None and + elapsed >= max_time_value) + + if max_tries_exceeded or max_time_exceeded: + _call_handlers(on_giveup, **details, value=ret) + break + + try: + seconds = _next_wait(wait, ret, jitter, elapsed, + max_time_value) + except StopIteration: + _call_handlers(on_giveup, **details) + break + + _call_handlers(on_backoff, **details, + value=ret, wait=seconds) + + time.sleep(seconds) + continue + else: + _call_handlers(on_success, **details, value=ret) + break + + return ret + + return retry + + +def retry_exception(target, wait_gen, exception, + *, + max_tries, max_time, jitter, giveup, + on_success, on_backoff, on_giveup, raise_on_giveup, + wait_gen_kwargs): + + @functools.wraps(target) + def retry(*args, **kwargs): + max_tries_value = _maybe_call(max_tries) + max_time_value = _maybe_call(max_time) + + tries = 0 + start = datetime.datetime.now() + wait = _init_wait_gen(wait_gen, wait_gen_kwargs) + while True: + tries += 1 + elapsed = timedelta.total_seconds(datetime.datetime.now() - start) + details = { + "target": target, + "args": args, + "kwargs": kwargs, + "tries": tries, + "elapsed": elapsed, + } + + try: + ret = target(*args, **kwargs) + except exception as e: + max_tries_exceeded = (tries == max_tries_value) + max_time_exceeded = (max_time_value is not None and + elapsed >= max_time_value) + + if giveup(e) or max_tries_exceeded or max_time_exceeded: + _call_handlers(on_giveup, **details, exception=e) + if raise_on_giveup: + raise + return None + + try: + seconds = _next_wait(wait, e, jitter, elapsed, + max_time_value) + except StopIteration: + _call_handlers(on_giveup, **details, exception=e) + raise e + + _call_handlers(on_backoff, **details, wait=seconds, + exception=e) + + time.sleep(seconds) + else: + _call_handlers(on_success, **details) + + return ret + return retry diff --git a/py311/lib/python3.11/site-packages/black/__init__.cpython-311-x86_64-linux-gnu.so b/py311/lib/python3.11/site-packages/black/__init__.cpython-311-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..32a68108842c418a008f25712a46e2a8f1fcc496 Binary files /dev/null and b/py311/lib/python3.11/site-packages/black/__init__.cpython-311-x86_64-linux-gnu.so differ diff --git a/py311/lib/python3.11/site-packages/black/__init__.py b/py311/lib/python3.11/site-packages/black/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..180f5883b1d85710ff73ea3702676f15d9eb9cec --- /dev/null +++ b/py311/lib/python3.11/site-packages/black/__init__.py @@ -0,0 +1,1654 @@ +import io +import json +import platform +import re +import sys +import tokenize +import traceback +from collections.abc import ( + Collection, + Generator, + MutableMapping, + Sequence, +) +from contextlib import nullcontext +from dataclasses import replace +from datetime import datetime, timezone +from enum import Enum +from json.decoder import JSONDecodeError +from pathlib import Path +from re import Pattern +from typing import Any + +import click +from click.core import ParameterSource +from mypy_extensions import mypyc_attr +from pathspec import PathSpec +from pathspec.patterns.gitwildmatch import GitWildMatchPatternError + +from _black_version import version as __version__ +from black.cache import Cache +from black.comments import normalize_fmt_off +from black.const import ( + DEFAULT_EXCLUDES, + DEFAULT_INCLUDES, + DEFAULT_LINE_LENGTH, + STDIN_PLACEHOLDER, +) +from black.files import ( + best_effort_relative_path, + find_project_root, + find_pyproject_toml, + find_user_pyproject_toml, + gen_python_files, + get_gitignore, + parse_pyproject_toml, + path_is_excluded, + resolves_outside_root_or_cannot_stat, + wrap_stream_for_windows, +) +from black.handle_ipynb_magics import ( + PYTHON_CELL_MAGICS, + jupyter_dependencies_are_installed, + mask_cell, + put_trailing_semicolon_back, + remove_trailing_semicolon, + unmask_cell, + validate_cell, +) +from black.linegen import LN, LineGenerator, transform_line +from black.lines import EmptyLineTracker, LinesBlock +from black.mode import FUTURE_FLAG_TO_FEATURE, VERSION_TO_FEATURES, Feature +from black.mode import Mode as Mode # re-exported +from black.mode import Preview, TargetVersion, supports_feature +from black.nodes import STARS, is_number_token, is_simple_decorator_expression, syms +from black.output import color_diff, diff, dump_to_file, err, ipynb_diff, out +from black.parsing import ( # noqa F401 + ASTSafetyError, + InvalidInput, + lib2to3_parse, + parse_ast, + stringify_ast, +) +from black.ranges import ( + adjusted_lines, + convert_unchanged_lines, + parse_line_ranges, + sanitized_lines, +) +from black.report import Changed, NothingChanged, Report +from blib2to3.pgen2 import token +from blib2to3.pytree import Leaf, Node + +COMPILED = Path(__file__).suffix in (".pyd", ".so") + +# types +FileContent = str +Encoding = str +NewLine = str + + +class WriteBack(Enum): + NO = 0 + YES = 1 + DIFF = 2 + CHECK = 3 + COLOR_DIFF = 4 + + @classmethod + def from_configuration( + cls, *, check: bool, diff: bool, color: bool = False + ) -> "WriteBack": + if check and not diff: + return cls.CHECK + + if diff and color: + return cls.COLOR_DIFF + + return cls.DIFF if diff else cls.YES + + +# Legacy name, left for integrations. +FileMode = Mode + + +def read_pyproject_toml( + ctx: click.Context, param: click.Parameter, value: str | None +) -> str | None: + """Inject Black configuration from "pyproject.toml" into defaults in `ctx`. + + Returns the path to a successfully found and read configuration file, None + otherwise. + """ + if not value: + value = find_pyproject_toml( + ctx.params.get("src", ()), ctx.params.get("stdin_filename", None) + ) + if value is None: + return None + + try: + config = parse_pyproject_toml(value) + except (OSError, ValueError) as e: + raise click.FileError( + filename=value, hint=f"Error reading configuration file: {e}" + ) from None + + if not config: + return None + else: + spellcheck_pyproject_toml_keys(ctx, list(config), value) + # Sanitize the values to be Click friendly. For more information please see: + # https://github.com/psf/black/issues/1458 + # https://github.com/pallets/click/issues/1567 + config = { + k: str(v) if not isinstance(v, (list, dict)) else v + for k, v in config.items() + } + + target_version = config.get("target_version") + if target_version is not None and not isinstance(target_version, list): + raise click.BadOptionUsage( + "target-version", "Config key target-version must be a list" + ) + + exclude = config.get("exclude") + if exclude is not None and not isinstance(exclude, str): + raise click.BadOptionUsage("exclude", "Config key exclude must be a string") + + extend_exclude = config.get("extend_exclude") + if extend_exclude is not None and not isinstance(extend_exclude, str): + raise click.BadOptionUsage( + "extend-exclude", "Config key extend-exclude must be a string" + ) + + line_ranges = config.get("line_ranges") + if line_ranges is not None: + raise click.BadOptionUsage( + "line-ranges", "Cannot use line-ranges in the pyproject.toml file." + ) + + default_map: dict[str, Any] = {} + if ctx.default_map: + default_map.update(ctx.default_map) + default_map.update(config) + + ctx.default_map = default_map + return value + + +def spellcheck_pyproject_toml_keys( + ctx: click.Context, config_keys: list[str], config_file_path: str +) -> None: + invalid_keys: list[str] = [] + available_config_options = {param.name for param in ctx.command.params} + invalid_keys = [key for key in config_keys if key not in available_config_options] + if invalid_keys: + keys_str = ", ".join(map(repr, invalid_keys)) + out( + f"Invalid config keys detected: {keys_str} (in {config_file_path})", + fg="red", + ) + + +def target_version_option_callback( + c: click.Context, p: click.Option | click.Parameter, v: tuple[str, ...] +) -> list[TargetVersion]: + """Compute the target versions from a --target-version flag. + + This is its own function because mypy couldn't infer the type correctly + when it was a lambda, causing mypyc trouble. + """ + return [TargetVersion[val.upper()] for val in v] + + +def enable_unstable_feature_callback( + c: click.Context, p: click.Option | click.Parameter, v: tuple[str, ...] +) -> list[Preview]: + """Compute the features from an --enable-unstable-feature flag.""" + return [Preview[val] for val in v] + + +def re_compile_maybe_verbose(regex: str) -> Pattern[str]: + """Compile a regular expression string in `regex`. + + If it contains newlines, use verbose mode. + """ + if "\n" in regex: + regex = "(?x)" + regex + compiled: Pattern[str] = re.compile(regex) + return compiled + + +def validate_regex( + ctx: click.Context, + param: click.Parameter, + value: str | None, +) -> Pattern[str] | None: + try: + return re_compile_maybe_verbose(value) if value is not None else None + except re.error as e: + raise click.BadParameter(f"Not a valid regular expression: {e}") from None + + +@click.command( + context_settings={"help_option_names": ["-h", "--help"]}, + # While Click does set this field automatically using the docstring, mypyc + # (annoyingly) strips 'em so we need to set it here too. + help="The uncompromising code formatter.", +) +@click.option("-c", "--code", type=str, help="Format the code passed in as a string.") +@click.option( + "-l", + "--line-length", + type=int, + default=DEFAULT_LINE_LENGTH, + help="How many characters per line to allow.", + show_default=True, +) +@click.option( + "-t", + "--target-version", + type=click.Choice([v.name.lower() for v in TargetVersion]), + callback=target_version_option_callback, + multiple=True, + help=( + "Python versions that should be supported by Black's output. You should" + " include all versions that your code supports. By default, Black will infer" + " target versions from the project metadata in pyproject.toml. If this does" + " not yield conclusive results, Black will use per-file auto-detection." + ), +) +@click.option( + "--pyi", + is_flag=True, + help=( + "Format all input files like typing stubs regardless of file extension. This" + " is useful when piping source on standard input." + ), +) +@click.option( + "--ipynb", + is_flag=True, + help=( + "Format all input files like Jupyter Notebooks regardless of file extension." + " This is useful when piping source on standard input." + ), +) +@click.option( + "--python-cell-magics", + multiple=True, + help=( + "When processing Jupyter Notebooks, add the given magic to the list" + f" of known python-magics ({', '.join(sorted(PYTHON_CELL_MAGICS))})." + " Useful for formatting cells with custom python magics." + ), + default=[], +) +@click.option( + "-x", + "--skip-source-first-line", + is_flag=True, + help="Skip the first line of the source code.", +) +@click.option( + "-S", + "--skip-string-normalization", + is_flag=True, + help="Don't normalize string quotes or prefixes.", +) +@click.option( + "-C", + "--skip-magic-trailing-comma", + is_flag=True, + help="Don't use trailing commas as a reason to split lines.", +) +@click.option( + "--preview", + is_flag=True, + help=( + "Enable potentially disruptive style changes that may be added to Black's main" + " functionality in the next major release." + ), +) +@click.option( + "--unstable", + is_flag=True, + help=( + "Enable potentially disruptive style changes that have known bugs or are not" + " currently expected to make it into the stable style Black's next major" + " release. Implies --preview." + ), +) +@click.option( + "--enable-unstable-feature", + type=click.Choice([v.name for v in Preview]), + callback=enable_unstable_feature_callback, + multiple=True, + help=( + "Enable specific features included in the `--unstable` style. Requires" + " `--preview`. No compatibility guarantees are provided on the behavior" + " or existence of any unstable features." + ), +) +@click.option( + "--check", + is_flag=True, + help=( + "Don't write the files back, just return the status. Return code 0 means" + " nothing would change. Return code 1 means some files would be reformatted." + " Return code 123 means there was an internal error." + ), +) +@click.option( + "--diff", + is_flag=True, + help=( + "Don't write the files back, just output a diff to indicate what changes" + " Black would've made. They are printed to stdout so capturing them is simple." + ), +) +@click.option( + "--color/--no-color", + is_flag=True, + help="Show (or do not show) colored diff. Only applies when --diff is given.", +) +@click.option( + "--line-ranges", + multiple=True, + metavar="START-END", + help=( + "When specified, Black will try its best to only format these lines. This" + " option can be specified multiple times, and a union of the lines will be" + " formatted. Each range must be specified as two integers connected by a `-`:" + " `-`. The `` and `` integer indices are 1-based and" + " inclusive on both ends." + ), + default=(), +) +@click.option( + "--fast/--safe", + is_flag=True, + help=( + "By default, Black performs an AST safety check after formatting your code." + " The --fast flag turns off this check and the --safe flag explicitly enables" + " it. [default: --safe]" + ), +) +@click.option( + "--required-version", + type=str, + help=( + "Require a specific version of Black to be running. This is useful for" + " ensuring that all contributors to your project are using the same" + " version, because different versions of Black may format code a little" + " differently. This option can be set in a configuration file for consistent" + " results across environments." + ), +) +@click.option( + "--exclude", + type=str, + callback=validate_regex, + help=( + "A regular expression that matches files and directories that should be" + " excluded on recursive searches. An empty value means no paths are excluded." + " Use forward slashes for directories on all platforms (Windows, too)." + " By default, Black also ignores all paths listed in .gitignore. Changing this" + f" value will override all default exclusions. [default: {DEFAULT_EXCLUDES}]" + ), + show_default=False, +) +@click.option( + "--extend-exclude", + type=str, + callback=validate_regex, + help=( + "Like --exclude, but adds additional files and directories on top of the" + " default values instead of overriding them." + ), +) +@click.option( + "--force-exclude", + type=str, + callback=validate_regex, + help=( + "Like --exclude, but files and directories matching this regex will be excluded" + " even when they are passed explicitly as arguments. This is useful when" + " invoking Black programmatically on changed files, such as in a pre-commit" + " hook or editor plugin." + ), +) +@click.option( + "--stdin-filename", + type=str, + is_eager=True, + help=( + "The name of the file when passing it through stdin. Useful to make sure Black" + " will respect the --force-exclude option on some editors that rely on using" + " stdin." + ), +) +@click.option( + "--include", + type=str, + default=DEFAULT_INCLUDES, + callback=validate_regex, + help=( + "A regular expression that matches files and directories that should be" + " included on recursive searches. An empty value means all files are included" + " regardless of the name. Use forward slashes for directories on all platforms" + " (Windows, too). Overrides all exclusions, including from .gitignore and" + " command line options." + ), + show_default=True, +) +@click.option( + "-W", + "--workers", + type=click.IntRange(min=1), + default=None, + help=( + "When Black formats multiple files, it may use a process pool to speed up" + " formatting. This option controls the number of parallel workers. This can" + " also be specified via the BLACK_NUM_WORKERS environment variable. Defaults" + " to the number of CPUs in the system." + ), +) +@click.option( + "-q", + "--quiet", + is_flag=True, + help=( + "Stop emitting all non-critical output. Error messages will still be emitted" + " (which can silenced by 2>/dev/null)." + ), +) +@click.option( + "-v", + "--verbose", + is_flag=True, + help=( + "Emit messages about files that were not changed or were ignored due to" + " exclusion patterns. If Black is using a configuration file, a message" + " detailing which one it is using will be emitted." + ), +) +@click.version_option( + version=__version__, + message=( + f"%(prog)s, %(version)s (compiled: {'yes' if COMPILED else 'no'})\n" + f"Python ({platform.python_implementation()}) {platform.python_version()}" + ), +) +@click.argument( + "src", + nargs=-1, + type=click.Path( + exists=True, file_okay=True, dir_okay=True, readable=True, allow_dash=True + ), + is_eager=True, + metavar="SRC ...", +) +@click.option( + "--config", + type=click.Path( + exists=True, + file_okay=True, + dir_okay=False, + readable=True, + allow_dash=False, + path_type=str, + ), + is_eager=True, + callback=read_pyproject_toml, + help="Read configuration options from a configuration file.", +) +@click.option( + "--no-cache", + is_flag=True, + help=( + "Skip reading and writing the cache, forcing Black to reformat all" + " included files." + ), +) +@click.pass_context +def main( + ctx: click.Context, + code: str | None, + line_length: int, + target_version: list[TargetVersion], + check: bool, + diff: bool, + line_ranges: Sequence[str], + color: bool, + fast: bool, + pyi: bool, + ipynb: bool, + python_cell_magics: Sequence[str], + skip_source_first_line: bool, + skip_string_normalization: bool, + skip_magic_trailing_comma: bool, + preview: bool, + unstable: bool, + enable_unstable_feature: list[Preview], + quiet: bool, + verbose: bool, + required_version: str | None, + include: Pattern[str], + exclude: Pattern[str] | None, + extend_exclude: Pattern[str] | None, + force_exclude: Pattern[str] | None, + stdin_filename: str | None, + workers: int | None, + src: tuple[str, ...], + config: str | None, + no_cache: bool, +) -> None: + """The uncompromising code formatter.""" + ctx.ensure_object(dict) + + assert sys.version_info >= (3, 10), "Black requires Python 3.10+" + if sys.version_info[:3] == (3, 12, 5): + out( + "Python 3.12.5 has a memory safety issue that can cause Black's " + "AST safety checks to fail. " + "Please upgrade to Python 3.12.6 or downgrade to Python 3.12.4" + ) + ctx.exit(1) + + if src and code is not None: + out( + main.get_usage(ctx) + + "\n\n'SRC' and 'code' cannot be passed simultaneously." + ) + ctx.exit(1) + if not src and code is None: + out(main.get_usage(ctx) + "\n\nOne of 'SRC' or 'code' is required.") + ctx.exit(1) + + # It doesn't do anything if --unstable is also passed, so just allow it. + if enable_unstable_feature and not (preview or unstable): + out( + main.get_usage(ctx) + + "\n\n'--enable-unstable-feature' requires '--preview'." + ) + ctx.exit(1) + + root, method = ( + find_project_root(src, stdin_filename) if code is None else (None, None) + ) + ctx.obj["root"] = root + + if verbose: + if root: + out( + f"Identified `{root}` as project root containing a {method}.", + fg="blue", + ) + + if config: + config_source = ctx.get_parameter_source("config") + user_level_config = str(find_user_pyproject_toml()) + if config == user_level_config: + out( + "Using configuration from user-level config at " + f"'{user_level_config}'.", + fg="blue", + ) + elif config_source in ( + ParameterSource.DEFAULT, + ParameterSource.DEFAULT_MAP, + ): + out("Using configuration from project root.", fg="blue") + else: + out(f"Using configuration in '{config}'.", fg="blue") + if ctx.default_map: + for param, value in ctx.default_map.items(): + out(f"{param}: {value}") + + error_msg = "Oh no! 💥 💔 💥" + if ( + required_version + and required_version != __version__ + and required_version != __version__.split(".")[0] + ): + err( + f"{error_msg} The required version `{required_version}` does not match" + f" the running version `{__version__}`!" + ) + ctx.exit(1) + if ipynb and pyi: + err("Cannot pass both `pyi` and `ipynb` flags!") + ctx.exit(1) + + write_back = WriteBack.from_configuration(check=check, diff=diff, color=color) + if target_version: + versions = set(target_version) + else: + # We'll autodetect later. + versions = set() + mode = Mode( + target_versions=versions, + line_length=line_length, + is_pyi=pyi, + is_ipynb=ipynb, + skip_source_first_line=skip_source_first_line, + string_normalization=not skip_string_normalization, + magic_trailing_comma=not skip_magic_trailing_comma, + preview=preview, + unstable=unstable, + python_cell_magics=set(python_cell_magics), + enabled_features=set(enable_unstable_feature), + ) + + lines: list[tuple[int, int]] = [] + if line_ranges: + if ipynb: + err("Cannot use --line-ranges with ipynb files.") + ctx.exit(1) + + try: + lines = parse_line_ranges(line_ranges) + except ValueError as e: + err(str(e)) + ctx.exit(1) + + if code is not None: + # Run in quiet mode by default with -c; the extra output isn't useful. + # You can still pass -v to get verbose output. + quiet = True + + report = Report(check=check, diff=diff, quiet=quiet, verbose=verbose) + + if code is not None: + reformat_code( + content=code, + fast=fast, + write_back=write_back, + mode=mode, + report=report, + lines=lines, + ) + else: + assert root is not None # root is only None if code is not None + try: + sources = get_sources( + root=root, + src=src, + quiet=quiet, + verbose=verbose, + include=include, + exclude=exclude, + extend_exclude=extend_exclude, + force_exclude=force_exclude, + report=report, + stdin_filename=stdin_filename, + ) + except GitWildMatchPatternError: + ctx.exit(1) + + if not sources: + if verbose or not quiet: + out("No Python files are present to be formatted. Nothing to do 😴") + if "-" in src: + sys.stdout.write(sys.stdin.read()) + ctx.exit(0) + + if len(sources) == 1: + reformat_one( + src=sources.pop(), + fast=fast, + write_back=write_back, + mode=mode, + report=report, + lines=lines, + no_cache=no_cache, + ) + else: + from black.concurrency import reformat_many + + if lines: + err("Cannot use --line-ranges to format multiple files.") + ctx.exit(1) + reformat_many( + sources=sources, + fast=fast, + write_back=write_back, + mode=mode, + report=report, + workers=workers, + no_cache=no_cache, + ) + + if verbose or not quiet: + if code is None and (verbose or report.change_count or report.failure_count): + out() + out(error_msg if report.return_code else "All done! ✨ 🍰 ✨") + if code is None: + click.echo(str(report), err=True) + ctx.exit(report.return_code) + + +def get_sources( + *, + root: Path, + src: tuple[str, ...], + quiet: bool, + verbose: bool, + include: Pattern[str], + exclude: Pattern[str] | None, + extend_exclude: Pattern[str] | None, + force_exclude: Pattern[str] | None, + report: "Report", + stdin_filename: str | None, +) -> set[Path]: + """Compute the set of files to be formatted.""" + sources: set[Path] = set() + + assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}" + using_default_exclude = exclude is None + exclude = re_compile_maybe_verbose(DEFAULT_EXCLUDES) if exclude is None else exclude + gitignore: dict[Path, PathSpec] | None = None + root_gitignore = get_gitignore(root) + + for s in src: + if s == "-" and stdin_filename: + path = Path(stdin_filename) + if path_is_excluded(stdin_filename, force_exclude): + report.path_ignored( + path, + "--stdin-filename matches the --force-exclude regular expression", + ) + continue + is_stdin = True + else: + path = Path(s) + is_stdin = False + + # Compare the logic here to the logic in `gen_python_files`. + if is_stdin or path.is_file(): + if resolves_outside_root_or_cannot_stat(path, root, report): + if verbose: + out(f'Skipping invalid source: "{path}"', fg="red") + continue + + root_relative_path = best_effort_relative_path(path, root).as_posix() + root_relative_path = "/" + root_relative_path + + # Hard-exclude any files that matches the `--force-exclude` regex. + if path_is_excluded(root_relative_path, force_exclude): + report.path_ignored( + path, "matches the --force-exclude regular expression" + ) + continue + + if is_stdin: + path = Path(f"{STDIN_PLACEHOLDER}{path}") + + if path.suffix == ".ipynb" and not jupyter_dependencies_are_installed( + warn=verbose or not quiet + ): + continue + + if verbose: + out(f'Found input source: "{path}"', fg="blue") + sources.add(path) + elif path.is_dir(): + path = root / (path.resolve().relative_to(root)) + if verbose: + out(f'Found input source directory: "{path}"', fg="blue") + + if using_default_exclude: + gitignore = { + root: root_gitignore, + path: get_gitignore(path), + } + sources.update( + gen_python_files( + path.iterdir(), + root, + include, + exclude, + extend_exclude, + force_exclude, + report, + gitignore, + verbose=verbose, + quiet=quiet, + ) + ) + elif s == "-": + if verbose: + out("Found input source stdin", fg="blue") + sources.add(path) + else: + err(f"invalid path: {s}") + + return sources + + +def reformat_code( + content: str, + fast: bool, + write_back: WriteBack, + mode: Mode, + report: Report, + *, + lines: Collection[tuple[int, int]] = (), +) -> None: + """ + Reformat and print out `content` without spawning child processes. + Similar to `reformat_one`, but for string content. + + `fast`, `write_back`, and `mode` options are passed to + :func:`format_file_in_place` or :func:`format_stdin_to_stdout`. + """ + path = Path("") + try: + changed = Changed.NO + if format_stdin_to_stdout( + content=content, fast=fast, write_back=write_back, mode=mode, lines=lines + ): + changed = Changed.YES + report.done(path, changed) + except Exception as exc: + if report.verbose: + traceback.print_exc() + report.failed(path, str(exc)) + + +# diff-shades depends on being to monkeypatch this function to operate. I know it's +# not ideal, but this shouldn't cause any issues ... hopefully. ~ichard26 +@mypyc_attr(patchable=True) +def reformat_one( + src: Path, + fast: bool, + write_back: WriteBack, + mode: Mode, + report: "Report", + *, + lines: Collection[tuple[int, int]] = (), + no_cache: bool = False, +) -> None: + """Reformat a single file under `src` without spawning child processes. + + `fast`, `write_back`, and `mode` options are passed to + :func:`format_file_in_place` or :func:`format_stdin_to_stdout`. + """ + try: + changed = Changed.NO + + if str(src) == "-": + is_stdin = True + elif str(src).startswith(STDIN_PLACEHOLDER): + is_stdin = True + # Use the original name again in case we want to print something + # to the user + src = Path(str(src)[len(STDIN_PLACEHOLDER) :]) + else: + is_stdin = False + + if is_stdin: + if src.suffix == ".pyi": + mode = replace(mode, is_pyi=True) + elif src.suffix == ".ipynb": + mode = replace(mode, is_ipynb=True) + if format_stdin_to_stdout( + fast=fast, write_back=write_back, mode=mode, lines=lines + ): + changed = Changed.YES + else: + cache = None if no_cache else Cache.read(mode) + if cache is not None and write_back not in ( + WriteBack.DIFF, + WriteBack.COLOR_DIFF, + ): + if not cache.is_changed(src): + changed = Changed.CACHED + if changed is not Changed.CACHED and format_file_in_place( + src, fast=fast, write_back=write_back, mode=mode, lines=lines + ): + changed = Changed.YES + if cache is not None and ( + (write_back is WriteBack.YES and changed is not Changed.CACHED) + or (write_back is WriteBack.CHECK and changed is Changed.NO) + ): + cache.write([src]) + report.done(src, changed) + except Exception as exc: + if report.verbose: + traceback.print_exc() + report.failed(src, str(exc)) + + +def format_file_in_place( + src: Path, + fast: bool, + mode: Mode, + write_back: WriteBack = WriteBack.NO, + lock: Any = None, # multiprocessing.Manager().Lock() is some crazy proxy + *, + lines: Collection[tuple[int, int]] = (), +) -> bool: + """Format file under `src` path. Return True if changed. + + If `write_back` is DIFF, write a diff to stdout. If it is YES, write reformatted + code to the file. + `mode` and `fast` options are passed to :func:`format_file_contents`. + """ + if src.suffix == ".pyi": + mode = replace(mode, is_pyi=True) + elif src.suffix == ".ipynb": + mode = replace(mode, is_ipynb=True) + + then = datetime.fromtimestamp(src.stat().st_mtime, timezone.utc) + header = b"" + with open(src, "rb") as buf: + if mode.skip_source_first_line: + header = buf.readline() + src_contents, encoding, newline = decode_bytes(buf.read(), mode) + try: + dst_contents = format_file_contents( + src_contents, fast=fast, mode=mode, lines=lines + ) + except NothingChanged: + return False + except JSONDecodeError: + raise ValueError( + f"File '{src}' cannot be parsed as valid Jupyter notebook." + ) from None + src_contents = header.decode(encoding) + src_contents + dst_contents = header.decode(encoding) + dst_contents + + if write_back == WriteBack.YES: + with open(src, "w", encoding=encoding, newline=newline) as f: + f.write(dst_contents) + elif write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF): + now = datetime.now(timezone.utc) + src_name = f"{src}\t{then}" + dst_name = f"{src}\t{now}" + if mode.is_ipynb: + diff_contents = ipynb_diff(src_contents, dst_contents, src_name, dst_name) + else: + diff_contents = diff(src_contents, dst_contents, src_name, dst_name) + + if write_back == WriteBack.COLOR_DIFF: + diff_contents = color_diff(diff_contents) + + with lock or nullcontext(): + f = io.TextIOWrapper( + sys.stdout.buffer, + encoding=encoding, + newline=newline, + write_through=True, + ) + f = wrap_stream_for_windows(f) + f.write(diff_contents) + f.detach() + + return True + + +def format_stdin_to_stdout( + fast: bool, + *, + content: str | None = None, + write_back: WriteBack = WriteBack.NO, + mode: Mode, + lines: Collection[tuple[int, int]] = (), +) -> bool: + """Format file on stdin. Return True if changed. + + If content is None, it's read from sys.stdin. + + If `write_back` is YES, write reformatted code back to stdout. If it is DIFF, + write a diff to stdout. The `mode` argument is passed to + :func:`format_file_contents`. + """ + then = datetime.now(timezone.utc) + + if content is None: + src, encoding, newline = decode_bytes(sys.stdin.buffer.read(), mode) + elif Preview.normalize_cr_newlines in mode: + src, encoding, newline = content, "utf-8", "\n" + else: + src, encoding, newline = content, "utf-8", "" + + dst = src + try: + dst = format_file_contents(src, fast=fast, mode=mode, lines=lines) + return True + + except NothingChanged: + return False + + finally: + f = io.TextIOWrapper( + sys.stdout.buffer, encoding=encoding, newline=newline, write_through=True + ) + if write_back == WriteBack.YES: + # Make sure there's a newline after the content + if Preview.normalize_cr_newlines in mode: + if dst and dst[-1] != "\n" and dst[-1] != "\r": + dst += newline + else: + if dst and dst[-1] != "\n": + dst += "\n" + f.write(dst) + elif write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF): + now = datetime.now(timezone.utc) + src_name = f"STDIN\t{then}" + dst_name = f"STDOUT\t{now}" + d = diff(src, dst, src_name, dst_name) + if write_back == WriteBack.COLOR_DIFF: + d = color_diff(d) + f = wrap_stream_for_windows(f) + f.write(d) + f.detach() + + +def check_stability_and_equivalence( + src_contents: str, + dst_contents: str, + *, + mode: Mode, + lines: Collection[tuple[int, int]] = (), +) -> None: + """Perform stability and equivalence checks. + + Raise AssertionError if source and destination contents are not + equivalent, or if a second pass of the formatter would format the + content differently. + """ + assert_equivalent(src_contents, dst_contents) + assert_stable(src_contents, dst_contents, mode=mode, lines=lines) + + +def format_file_contents( + src_contents: str, + *, + fast: bool, + mode: Mode, + lines: Collection[tuple[int, int]] = (), +) -> FileContent: + """Reformat contents of a file and return new contents. + + If `fast` is False, additionally confirm that the reformatted code is + valid by calling :func:`assert_equivalent` and :func:`assert_stable` on it. + `mode` is passed to :func:`format_str`. + """ + if mode.is_ipynb: + dst_contents = format_ipynb_string(src_contents, fast=fast, mode=mode) + else: + dst_contents = format_str(src_contents, mode=mode, lines=lines) + if src_contents == dst_contents: + raise NothingChanged + + if not fast and not mode.is_ipynb: + # Jupyter notebooks will already have been checked above. + check_stability_and_equivalence( + src_contents, dst_contents, mode=mode, lines=lines + ) + return dst_contents + + +def format_cell(src: str, *, fast: bool, mode: Mode) -> str: + """Format code in given cell of Jupyter notebook. + + General idea is: + + - if cell has trailing semicolon, remove it; + - if cell has IPython magics, mask them; + - format cell; + - reinstate IPython magics; + - reinstate trailing semicolon (if originally present); + - strip trailing newlines. + + Cells with syntax errors will not be processed, as they + could potentially be automagics or multi-line magics, which + are currently not supported. + """ + validate_cell(src, mode) + src_without_trailing_semicolon, has_trailing_semicolon = remove_trailing_semicolon( + src + ) + try: + masked_src, replacements = mask_cell(src_without_trailing_semicolon) + except SyntaxError: + raise NothingChanged from None + masked_dst = format_str(masked_src, mode=mode) + if not fast: + check_stability_and_equivalence(masked_src, masked_dst, mode=mode) + dst_without_trailing_semicolon = unmask_cell(masked_dst, replacements) + dst = put_trailing_semicolon_back( + dst_without_trailing_semicolon, has_trailing_semicolon + ) + dst = dst.rstrip("\n") + if dst == src: + raise NothingChanged from None + return dst + + +def validate_metadata(nb: MutableMapping[str, Any]) -> None: + """If notebook is marked as non-Python, don't format it. + + All notebook metadata fields are optional, see + https://nbformat.readthedocs.io/en/latest/format_description.html. So + if a notebook has empty metadata, we will try to parse it anyway. + """ + language = nb.get("metadata", {}).get("language_info", {}).get("name", None) + if language is not None and language != "python": + raise NothingChanged from None + + +def format_ipynb_string(src_contents: str, *, fast: bool, mode: Mode) -> FileContent: + """Format Jupyter notebook. + + Operate cell-by-cell, only on code cells, only for Python notebooks. + If the ``.ipynb`` originally had a trailing newline, it'll be preserved. + """ + if not src_contents: + raise NothingChanged + + trailing_newline = src_contents[-1] == "\n" + modified = False + nb = json.loads(src_contents) + validate_metadata(nb) + for cell in nb["cells"]: + if cell.get("cell_type", None) == "code": + try: + src = "".join(cell["source"]) + dst = format_cell(src, fast=fast, mode=mode) + except NothingChanged: + pass + else: + cell["source"] = dst.splitlines(keepends=True) + modified = True + if modified: + dst_contents = json.dumps(nb, indent=1, ensure_ascii=False) + if trailing_newline: + dst_contents = dst_contents + "\n" + return dst_contents + else: + raise NothingChanged + + +def format_str( + src_contents: str, *, mode: Mode, lines: Collection[tuple[int, int]] = () +) -> str: + """Reformat a string and return new contents. + + `mode` determines formatting options, such as how many characters per line are + allowed. Example: + + >>> import black + >>> print(black.format_str("def f(arg:str='')->None:...", mode=black.Mode())) + def f(arg: str = "") -> None: + ... + + A more complex example: + + >>> print( + ... black.format_str( + ... "def f(arg:str='')->None: hey", + ... mode=black.Mode( + ... target_versions={black.TargetVersion.PY36}, + ... line_length=10, + ... string_normalization=False, + ... is_pyi=False, + ... ), + ... ), + ... ) + def f( + arg: str = '', + ) -> None: + hey + + """ + if lines: + lines = sanitized_lines(lines, src_contents) + if not lines: + return src_contents # Nothing to format + dst_contents = _format_str_once(src_contents, mode=mode, lines=lines) + # Forced second pass to work around optional trailing commas (becoming + # forced trailing commas on pass 2) interacting differently with optional + # parentheses. Admittedly ugly. + if src_contents != dst_contents: + if lines: + lines = adjusted_lines(lines, src_contents, dst_contents) + return _format_str_once(dst_contents, mode=mode, lines=lines) + return dst_contents + + +def _format_str_once( + src_contents: str, *, mode: Mode, lines: Collection[tuple[int, int]] = () +) -> str: + if Preview.normalize_cr_newlines in mode: + normalized_contents, _, newline_type = decode_bytes( + src_contents.encode("utf-8"), mode + ) + + src_node = lib2to3_parse( + normalized_contents.lstrip(), target_versions=mode.target_versions + ) + else: + src_node = lib2to3_parse(src_contents.lstrip(), mode.target_versions) + + dst_blocks: list[LinesBlock] = [] + if mode.target_versions: + versions = mode.target_versions + else: + future_imports = get_future_imports(src_node) + versions = detect_target_versions(src_node, future_imports=future_imports) + + line_generation_features = { + feature + for feature in { + Feature.PARENTHESIZED_CONTEXT_MANAGERS, + Feature.UNPARENTHESIZED_EXCEPT_TYPES, + Feature.T_STRINGS, + } + if supports_feature(versions, feature) + } + normalize_fmt_off(src_node, mode, lines) + if lines: + # This should be called after normalize_fmt_off. + convert_unchanged_lines(src_node, lines) + + line_generator = LineGenerator(mode=mode, features=line_generation_features) + elt = EmptyLineTracker(mode=mode) + split_line_features = { + feature + for feature in { + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + } + if supports_feature(versions, feature) + } + block: LinesBlock | None = None + for current_line in line_generator.visit(src_node): + block = elt.maybe_empty_lines(current_line) + dst_blocks.append(block) + for line in transform_line( + current_line, mode=mode, features=split_line_features + ): + block.content_lines.append(str(line)) + if dst_blocks: + dst_blocks[-1].after = 0 + dst_contents = [] + for block in dst_blocks: + dst_contents.extend(block.all_lines()) + if not dst_contents: + if Preview.normalize_cr_newlines in mode: + if "\n" in normalized_contents: + return newline_type + else: + # Use decode_bytes to retrieve the correct source newline (CRLF or LF), + # and check if normalized_content has more than one line + normalized_content, _, newline = decode_bytes( + src_contents.encode("utf-8"), mode + ) + if "\n" in normalized_content: + return newline + return "" + if Preview.normalize_cr_newlines in mode: + return "".join(dst_contents).replace("\n", newline_type) + else: + return "".join(dst_contents) + + +def decode_bytes(src: bytes, mode: Mode) -> tuple[FileContent, Encoding, NewLine]: + """Return a tuple of (decoded_contents, encoding, newline). + + `newline` is either CRLF or LF but `decoded_contents` is decoded with + universal newlines (i.e. only contains LF). + """ + srcbuf = io.BytesIO(src) + encoding, lines = tokenize.detect_encoding(srcbuf.readline) + if not lines: + return "", encoding, "\n" + + if Preview.normalize_cr_newlines in mode: + if lines[0][-2:] == b"\r\n": + if b"\r" in lines[0][:-2]: + newline = "\r" + else: + newline = "\r\n" + elif lines[0][-1:] == b"\n": + if b"\r" in lines[0][:-1]: + newline = "\r" + else: + newline = "\n" + else: + if b"\r" in lines[0]: + newline = "\r" + else: + newline = "\n" + else: + newline = "\r\n" if lines[0][-2:] == b"\r\n" else "\n" + + srcbuf.seek(0) + with io.TextIOWrapper(srcbuf, encoding) as tiow: + return tiow.read(), encoding, newline + + +def get_features_used( + node: Node, *, future_imports: set[str] | None = None +) -> set[Feature]: + """Return a set of (relatively) new Python features used in this file. + + Currently looking for: + - f-strings; + - self-documenting expressions in f-strings (f"{x=}"); + - underscores in numeric literals; + - trailing commas after * or ** in function signatures and calls; + - positional only arguments in function signatures and lambdas; + - assignment expression; + - relaxed decorator syntax; + - usage of __future__ flags (annotations); + - print / exec statements; + - parenthesized context managers; + - match statements; + - except* clause; + - variadic generics; + """ + features: set[Feature] = set() + if future_imports: + features |= { + FUTURE_FLAG_TO_FEATURE[future_import] + for future_import in future_imports + if future_import in FUTURE_FLAG_TO_FEATURE + } + + for n in node.pre_order(): + if n.type == token.FSTRING_START: + features.add(Feature.F_STRINGS) + elif n.type == token.TSTRING_START: + features.add(Feature.T_STRINGS) + elif ( + n.type == token.RBRACE + and n.parent is not None + and any(child.type == token.EQUAL for child in n.parent.children) + ): + features.add(Feature.DEBUG_F_STRINGS) + + elif is_number_token(n): + if "_" in n.value: + features.add(Feature.NUMERIC_UNDERSCORES) + + elif n.type == token.SLASH: + if n.parent and n.parent.type in { + syms.typedargslist, + syms.arglist, + syms.varargslist, + }: + features.add(Feature.POS_ONLY_ARGUMENTS) + + elif n.type == token.COLONEQUAL: + features.add(Feature.ASSIGNMENT_EXPRESSIONS) + + elif n.type == syms.decorator: + if len(n.children) > 1 and not is_simple_decorator_expression( + n.children[1] + ): + features.add(Feature.RELAXED_DECORATORS) + + elif ( + n.type in {syms.typedargslist, syms.arglist} + and n.children + and n.children[-1].type == token.COMMA + ): + if n.type == syms.typedargslist: + feature = Feature.TRAILING_COMMA_IN_DEF + else: + feature = Feature.TRAILING_COMMA_IN_CALL + + for ch in n.children: + if ch.type in STARS: + features.add(feature) + + if ch.type == syms.argument: + for argch in ch.children: + if argch.type in STARS: + features.add(feature) + + elif ( + n.type in {syms.return_stmt, syms.yield_expr} + and len(n.children) >= 2 + and n.children[1].type == syms.testlist_star_expr + and any(child.type == syms.star_expr for child in n.children[1].children) + ): + features.add(Feature.UNPACKING_ON_FLOW) + + elif ( + n.type == syms.annassign + and len(n.children) >= 4 + and n.children[3].type == syms.testlist_star_expr + ): + features.add(Feature.ANN_ASSIGN_EXTENDED_RHS) + + elif ( + n.type == syms.with_stmt + and len(n.children) > 2 + and n.children[1].type == syms.atom + ): + atom_children = n.children[1].children + if ( + len(atom_children) == 3 + and atom_children[0].type == token.LPAR + and _contains_asexpr(atom_children[1]) + and atom_children[2].type == token.RPAR + ): + features.add(Feature.PARENTHESIZED_CONTEXT_MANAGERS) + + elif n.type == syms.match_stmt: + features.add(Feature.PATTERN_MATCHING) + + elif n.type in {syms.subscriptlist, syms.trailer} and any( + child.type == syms.star_expr for child in n.children + ): + features.add(Feature.VARIADIC_GENERICS) + + elif ( + n.type == syms.tname_star + and len(n.children) == 3 + and n.children[2].type == syms.star_expr + ): + features.add(Feature.VARIADIC_GENERICS) + + elif n.type in (syms.type_stmt, syms.typeparams): + features.add(Feature.TYPE_PARAMS) + + elif ( + n.type in (syms.typevartuple, syms.paramspec, syms.typevar) + and n.children[-2].type == token.EQUAL + ): + features.add(Feature.TYPE_PARAM_DEFAULTS) + + elif ( + n.type == syms.except_clause + and len(n.children) >= 2 + and ( + n.children[1].type == token.STAR or n.children[1].type == syms.testlist + ) + ): + is_star_except = n.children[1].type == token.STAR + + if is_star_except: + features.add(Feature.EXCEPT_STAR) + + # Presence of except* pushes as clause 1 index back + has_as_clause = ( + len(n.children) >= is_star_except + 3 + and n.children[is_star_except + 2].type == token.NAME + and n.children[is_star_except + 2].value == "as" # type: ignore + ) + + # If there's no 'as' clause and the except expression is a testlist. + if not has_as_clause and ( + (is_star_except and n.children[2].type == syms.testlist) + or (not is_star_except and n.children[1].type == syms.testlist) + ): + features.add(Feature.UNPARENTHESIZED_EXCEPT_TYPES) + + return features + + +def _contains_asexpr(node: Node | Leaf) -> bool: + """Return True if `node` contains an as-pattern.""" + if node.type == syms.asexpr_test: + return True + elif node.type == syms.atom: + if ( + len(node.children) == 3 + and node.children[0].type == token.LPAR + and node.children[2].type == token.RPAR + ): + return _contains_asexpr(node.children[1]) + elif node.type == syms.testlist_gexp: + return any(_contains_asexpr(child) for child in node.children) + return False + + +def detect_target_versions( + node: Node, *, future_imports: set[str] | None = None +) -> set[TargetVersion]: + """Detect the version to target based on the nodes used.""" + features = get_features_used(node, future_imports=future_imports) + return { + version for version in TargetVersion if features <= VERSION_TO_FEATURES[version] + } + + +def get_future_imports(node: Node) -> set[str]: + """Return a set of __future__ imports in the file.""" + imports: set[str] = set() + + def get_imports_from_children(children: list[LN]) -> Generator[str, None, None]: + for child in children: + if isinstance(child, Leaf): + if child.type == token.NAME: + yield child.value + + elif child.type == syms.import_as_name: + orig_name = child.children[0] + assert isinstance(orig_name, Leaf), "Invalid syntax parsing imports" + assert orig_name.type == token.NAME, "Invalid syntax parsing imports" + yield orig_name.value + + elif child.type == syms.import_as_names: + yield from get_imports_from_children(child.children) + + else: + raise AssertionError("Invalid syntax parsing imports") + + for child in node.children: + if child.type != syms.simple_stmt: + break + + first_child = child.children[0] + if isinstance(first_child, Leaf): + # Continue looking if we see a docstring; otherwise stop. + if ( + len(child.children) == 2 + and first_child.type == token.STRING + and child.children[1].type == token.NEWLINE + ): + continue + + break + + elif first_child.type == syms.import_from: + module_name = first_child.children[1] + if not isinstance(module_name, Leaf) or module_name.value != "__future__": + break + + imports |= set(get_imports_from_children(first_child.children[3:])) + else: + break + + return imports + + +def _black_info() -> str: + return ( + f"Black {__version__} on " + f"Python ({platform.python_implementation()}) {platform.python_version()}" + ) + + +def assert_equivalent(src: str, dst: str) -> None: + """Raise AssertionError if `src` and `dst` aren't equivalent.""" + try: + src_ast = parse_ast(src) + except Exception as exc: + raise ASTSafetyError( + "cannot use --safe with this file; failed to parse source file AST: " + f"{exc}\n" + "This could be caused by running Black with an older Python version " + "that does not support new syntax used in your source file." + ) from exc + + try: + dst_ast = parse_ast(dst) + except Exception as exc: + log = dump_to_file("".join(traceback.format_tb(exc.__traceback__)), dst) + raise ASTSafetyError( + f"INTERNAL ERROR: {_black_info()} produced invalid code: {exc}. " + "Please report a bug on https://github.com/psf/black/issues. " + f"This invalid output might be helpful: {log}" + ) from None + + src_ast_str = "\n".join(stringify_ast(src_ast)) + dst_ast_str = "\n".join(stringify_ast(dst_ast)) + if src_ast_str != dst_ast_str: + log = dump_to_file(diff(src_ast_str, dst_ast_str, "src", "dst")) + raise ASTSafetyError( + f"INTERNAL ERROR: {_black_info()} produced code that is not equivalent to" + " the source. Please report a bug on https://github.com/psf/black/issues." + f" This diff might be helpful: {log}" + ) from None + + +def assert_stable( + src: str, dst: str, mode: Mode, *, lines: Collection[tuple[int, int]] = () +) -> None: + """Raise AssertionError if `dst` reformats differently the second time.""" + if lines: + # Formatting specified lines requires `adjusted_lines` to map original lines + # to the formatted lines before re-formatting the previously formatted result. + # Due to less-ideal diff algorithm, some edge cases produce incorrect new line + # ranges. Hence for now, we skip the stable check. + # See https://github.com/psf/black/issues/4033 for context. + return + # We shouldn't call format_str() here, because that formats the string + # twice and may hide a bug where we bounce back and forth between two + # versions. + newdst = _format_str_once(dst, mode=mode, lines=lines) + if dst != newdst: + log = dump_to_file( + str(mode), + diff(src, dst, "source", "first pass"), + diff(dst, newdst, "first pass", "second pass"), + ) + raise AssertionError( + f"INTERNAL ERROR: {_black_info()} produced different code on the second" + " pass of the formatter. Please report a bug on" + f" https://github.com/psf/black/issues. This diff might be helpful: {log}" + ) from None + + +def patched_main() -> None: + # PyInstaller patches multiprocessing to need freeze_support() even in non-Windows + # environments so just assume we always need to call it if frozen. + if getattr(sys, "frozen", False): + from multiprocessing import freeze_support + + freeze_support() + + main() + + +if __name__ == "__main__": + patched_main() diff --git a/py311/lib/python3.11/site-packages/black/__main__.py b/py311/lib/python3.11/site-packages/black/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..19b810b530acb20f7cf790f2b902f717ef1d8c03 --- /dev/null +++ b/py311/lib/python3.11/site-packages/black/__main__.py @@ -0,0 +1,3 @@ +from black import patched_main + +patched_main() diff --git a/py311/lib/python3.11/site-packages/black/_width_table.cpython-311-x86_64-linux-gnu.so b/py311/lib/python3.11/site-packages/black/_width_table.cpython-311-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..f784cf097b67567345b38ea4fd79d329548e4312 Binary files /dev/null and b/py311/lib/python3.11/site-packages/black/_width_table.cpython-311-x86_64-linux-gnu.so differ diff --git a/py311/lib/python3.11/site-packages/black/_width_table.py b/py311/lib/python3.11/site-packages/black/_width_table.py new file mode 100644 index 0000000000000000000000000000000000000000..5f6ff9febc33d69b27746b5574899116c28c82da --- /dev/null +++ b/py311/lib/python3.11/site-packages/black/_width_table.py @@ -0,0 +1,478 @@ +# Generated by make_width_table.py +# wcwidth 0.2.6 +# Unicode 15.0.0 +from typing import Final + +WIDTH_TABLE: Final[list[tuple[int, int, int]]] = [ + (0, 0, 0), + (1, 31, -1), + (127, 159, -1), + (768, 879, 0), + (1155, 1161, 0), + (1425, 1469, 0), + (1471, 1471, 0), + (1473, 1474, 0), + (1476, 1477, 0), + (1479, 1479, 0), + (1552, 1562, 0), + (1611, 1631, 0), + (1648, 1648, 0), + (1750, 1756, 0), + (1759, 1764, 0), + (1767, 1768, 0), + (1770, 1773, 0), + (1809, 1809, 0), + (1840, 1866, 0), + (1958, 1968, 0), + (2027, 2035, 0), + (2045, 2045, 0), + (2070, 2073, 0), + (2075, 2083, 0), + (2085, 2087, 0), + (2089, 2093, 0), + (2137, 2139, 0), + (2200, 2207, 0), + (2250, 2273, 0), + (2275, 2306, 0), + (2362, 2362, 0), + (2364, 2364, 0), + (2369, 2376, 0), + (2381, 2381, 0), + (2385, 2391, 0), + (2402, 2403, 0), + (2433, 2433, 0), + (2492, 2492, 0), + (2497, 2500, 0), + (2509, 2509, 0), + (2530, 2531, 0), + (2558, 2558, 0), + (2561, 2562, 0), + (2620, 2620, 0), + (2625, 2626, 0), + (2631, 2632, 0), + (2635, 2637, 0), + (2641, 2641, 0), + (2672, 2673, 0), + (2677, 2677, 0), + (2689, 2690, 0), + (2748, 2748, 0), + (2753, 2757, 0), + (2759, 2760, 0), + (2765, 2765, 0), + (2786, 2787, 0), + (2810, 2815, 0), + (2817, 2817, 0), + (2876, 2876, 0), + (2879, 2879, 0), + (2881, 2884, 0), + (2893, 2893, 0), + (2901, 2902, 0), + (2914, 2915, 0), + (2946, 2946, 0), + (3008, 3008, 0), + (3021, 3021, 0), + (3072, 3072, 0), + (3076, 3076, 0), + (3132, 3132, 0), + (3134, 3136, 0), + (3142, 3144, 0), + (3146, 3149, 0), + (3157, 3158, 0), + (3170, 3171, 0), + (3201, 3201, 0), + (3260, 3260, 0), + (3263, 3263, 0), + (3270, 3270, 0), + (3276, 3277, 0), + (3298, 3299, 0), + (3328, 3329, 0), + (3387, 3388, 0), + (3393, 3396, 0), + (3405, 3405, 0), + (3426, 3427, 0), + (3457, 3457, 0), + (3530, 3530, 0), + (3538, 3540, 0), + (3542, 3542, 0), + (3633, 3633, 0), + (3636, 3642, 0), + (3655, 3662, 0), + (3761, 3761, 0), + (3764, 3772, 0), + (3784, 3790, 0), + (3864, 3865, 0), + (3893, 3893, 0), + (3895, 3895, 0), + (3897, 3897, 0), + (3953, 3966, 0), + (3968, 3972, 0), + (3974, 3975, 0), + (3981, 3991, 0), + (3993, 4028, 0), + (4038, 4038, 0), + (4141, 4144, 0), + (4146, 4151, 0), + (4153, 4154, 0), + (4157, 4158, 0), + (4184, 4185, 0), + (4190, 4192, 0), + (4209, 4212, 0), + (4226, 4226, 0), + (4229, 4230, 0), + (4237, 4237, 0), + (4253, 4253, 0), + (4352, 4447, 2), + (4957, 4959, 0), + (5906, 5908, 0), + (5938, 5939, 0), + (5970, 5971, 0), + (6002, 6003, 0), + (6068, 6069, 0), + (6071, 6077, 0), + (6086, 6086, 0), + (6089, 6099, 0), + (6109, 6109, 0), + (6155, 6157, 0), + (6159, 6159, 0), + (6277, 6278, 0), + (6313, 6313, 0), + (6432, 6434, 0), + (6439, 6440, 0), + (6450, 6450, 0), + (6457, 6459, 0), + (6679, 6680, 0), + (6683, 6683, 0), + (6742, 6742, 0), + (6744, 6750, 0), + (6752, 6752, 0), + (6754, 6754, 0), + (6757, 6764, 0), + (6771, 6780, 0), + (6783, 6783, 0), + (6832, 6862, 0), + (6912, 6915, 0), + (6964, 6964, 0), + (6966, 6970, 0), + (6972, 6972, 0), + (6978, 6978, 0), + (7019, 7027, 0), + (7040, 7041, 0), + (7074, 7077, 0), + (7080, 7081, 0), + (7083, 7085, 0), + (7142, 7142, 0), + (7144, 7145, 0), + (7149, 7149, 0), + (7151, 7153, 0), + (7212, 7219, 0), + (7222, 7223, 0), + (7376, 7378, 0), + (7380, 7392, 0), + (7394, 7400, 0), + (7405, 7405, 0), + (7412, 7412, 0), + (7416, 7417, 0), + (7616, 7679, 0), + (8203, 8207, 0), + (8232, 8238, 0), + (8288, 8291, 0), + (8400, 8432, 0), + (8986, 8987, 2), + (9001, 9002, 2), + (9193, 9196, 2), + (9200, 9200, 2), + (9203, 9203, 2), + (9725, 9726, 2), + (9748, 9749, 2), + (9800, 9811, 2), + (9855, 9855, 2), + (9875, 9875, 2), + (9889, 9889, 2), + (9898, 9899, 2), + (9917, 9918, 2), + (9924, 9925, 2), + (9934, 9934, 2), + (9940, 9940, 2), + (9962, 9962, 2), + (9970, 9971, 2), + (9973, 9973, 2), + (9978, 9978, 2), + (9981, 9981, 2), + (9989, 9989, 2), + (9994, 9995, 2), + (10024, 10024, 2), + (10060, 10060, 2), + (10062, 10062, 2), + (10067, 10069, 2), + (10071, 10071, 2), + (10133, 10135, 2), + (10160, 10160, 2), + (10175, 10175, 2), + (11035, 11036, 2), + (11088, 11088, 2), + (11093, 11093, 2), + (11503, 11505, 0), + (11647, 11647, 0), + (11744, 11775, 0), + (11904, 11929, 2), + (11931, 12019, 2), + (12032, 12245, 2), + (12272, 12283, 2), + (12288, 12329, 2), + (12330, 12333, 0), + (12334, 12350, 2), + (12353, 12438, 2), + (12441, 12442, 0), + (12443, 12543, 2), + (12549, 12591, 2), + (12593, 12686, 2), + (12688, 12771, 2), + (12784, 12830, 2), + (12832, 12871, 2), + (12880, 19903, 2), + (19968, 42124, 2), + (42128, 42182, 2), + (42607, 42610, 0), + (42612, 42621, 0), + (42654, 42655, 0), + (42736, 42737, 0), + (43010, 43010, 0), + (43014, 43014, 0), + (43019, 43019, 0), + (43045, 43046, 0), + (43052, 43052, 0), + (43204, 43205, 0), + (43232, 43249, 0), + (43263, 43263, 0), + (43302, 43309, 0), + (43335, 43345, 0), + (43360, 43388, 2), + (43392, 43394, 0), + (43443, 43443, 0), + (43446, 43449, 0), + (43452, 43453, 0), + (43493, 43493, 0), + (43561, 43566, 0), + (43569, 43570, 0), + (43573, 43574, 0), + (43587, 43587, 0), + (43596, 43596, 0), + (43644, 43644, 0), + (43696, 43696, 0), + (43698, 43700, 0), + (43703, 43704, 0), + (43710, 43711, 0), + (43713, 43713, 0), + (43756, 43757, 0), + (43766, 43766, 0), + (44005, 44005, 0), + (44008, 44008, 0), + (44013, 44013, 0), + (44032, 55203, 2), + (63744, 64255, 2), + (64286, 64286, 0), + (65024, 65039, 0), + (65040, 65049, 2), + (65056, 65071, 0), + (65072, 65106, 2), + (65108, 65126, 2), + (65128, 65131, 2), + (65281, 65376, 2), + (65504, 65510, 2), + (66045, 66045, 0), + (66272, 66272, 0), + (66422, 66426, 0), + (68097, 68099, 0), + (68101, 68102, 0), + (68108, 68111, 0), + (68152, 68154, 0), + (68159, 68159, 0), + (68325, 68326, 0), + (68900, 68903, 0), + (69291, 69292, 0), + (69373, 69375, 0), + (69446, 69456, 0), + (69506, 69509, 0), + (69633, 69633, 0), + (69688, 69702, 0), + (69744, 69744, 0), + (69747, 69748, 0), + (69759, 69761, 0), + (69811, 69814, 0), + (69817, 69818, 0), + (69826, 69826, 0), + (69888, 69890, 0), + (69927, 69931, 0), + (69933, 69940, 0), + (70003, 70003, 0), + (70016, 70017, 0), + (70070, 70078, 0), + (70089, 70092, 0), + (70095, 70095, 0), + (70191, 70193, 0), + (70196, 70196, 0), + (70198, 70199, 0), + (70206, 70206, 0), + (70209, 70209, 0), + (70367, 70367, 0), + (70371, 70378, 0), + (70400, 70401, 0), + (70459, 70460, 0), + (70464, 70464, 0), + (70502, 70508, 0), + (70512, 70516, 0), + (70712, 70719, 0), + (70722, 70724, 0), + (70726, 70726, 0), + (70750, 70750, 0), + (70835, 70840, 0), + (70842, 70842, 0), + (70847, 70848, 0), + (70850, 70851, 0), + (71090, 71093, 0), + (71100, 71101, 0), + (71103, 71104, 0), + (71132, 71133, 0), + (71219, 71226, 0), + (71229, 71229, 0), + (71231, 71232, 0), + (71339, 71339, 0), + (71341, 71341, 0), + (71344, 71349, 0), + (71351, 71351, 0), + (71453, 71455, 0), + (71458, 71461, 0), + (71463, 71467, 0), + (71727, 71735, 0), + (71737, 71738, 0), + (71995, 71996, 0), + (71998, 71998, 0), + (72003, 72003, 0), + (72148, 72151, 0), + (72154, 72155, 0), + (72160, 72160, 0), + (72193, 72202, 0), + (72243, 72248, 0), + (72251, 72254, 0), + (72263, 72263, 0), + (72273, 72278, 0), + (72281, 72283, 0), + (72330, 72342, 0), + (72344, 72345, 0), + (72752, 72758, 0), + (72760, 72765, 0), + (72767, 72767, 0), + (72850, 72871, 0), + (72874, 72880, 0), + (72882, 72883, 0), + (72885, 72886, 0), + (73009, 73014, 0), + (73018, 73018, 0), + (73020, 73021, 0), + (73023, 73029, 0), + (73031, 73031, 0), + (73104, 73105, 0), + (73109, 73109, 0), + (73111, 73111, 0), + (73459, 73460, 0), + (73472, 73473, 0), + (73526, 73530, 0), + (73536, 73536, 0), + (73538, 73538, 0), + (78912, 78912, 0), + (78919, 78933, 0), + (92912, 92916, 0), + (92976, 92982, 0), + (94031, 94031, 0), + (94095, 94098, 0), + (94176, 94179, 2), + (94180, 94180, 0), + (94192, 94193, 2), + (94208, 100343, 2), + (100352, 101589, 2), + (101632, 101640, 2), + (110576, 110579, 2), + (110581, 110587, 2), + (110589, 110590, 2), + (110592, 110882, 2), + (110898, 110898, 2), + (110928, 110930, 2), + (110933, 110933, 2), + (110948, 110951, 2), + (110960, 111355, 2), + (113821, 113822, 0), + (118528, 118573, 0), + (118576, 118598, 0), + (119143, 119145, 0), + (119163, 119170, 0), + (119173, 119179, 0), + (119210, 119213, 0), + (119362, 119364, 0), + (121344, 121398, 0), + (121403, 121452, 0), + (121461, 121461, 0), + (121476, 121476, 0), + (121499, 121503, 0), + (121505, 121519, 0), + (122880, 122886, 0), + (122888, 122904, 0), + (122907, 122913, 0), + (122915, 122916, 0), + (122918, 122922, 0), + (123023, 123023, 0), + (123184, 123190, 0), + (123566, 123566, 0), + (123628, 123631, 0), + (124140, 124143, 0), + (125136, 125142, 0), + (125252, 125258, 0), + (126980, 126980, 2), + (127183, 127183, 2), + (127374, 127374, 2), + (127377, 127386, 2), + (127488, 127490, 2), + (127504, 127547, 2), + (127552, 127560, 2), + (127568, 127569, 2), + (127584, 127589, 2), + (127744, 127776, 2), + (127789, 127797, 2), + (127799, 127868, 2), + (127870, 127891, 2), + (127904, 127946, 2), + (127951, 127955, 2), + (127968, 127984, 2), + (127988, 127988, 2), + (127992, 128062, 2), + (128064, 128064, 2), + (128066, 128252, 2), + (128255, 128317, 2), + (128331, 128334, 2), + (128336, 128359, 2), + (128378, 128378, 2), + (128405, 128406, 2), + (128420, 128420, 2), + (128507, 128591, 2), + (128640, 128709, 2), + (128716, 128716, 2), + (128720, 128722, 2), + (128725, 128727, 2), + (128732, 128735, 2), + (128747, 128748, 2), + (128756, 128764, 2), + (128992, 129003, 2), + (129008, 129008, 2), + (129292, 129338, 2), + (129340, 129349, 2), + (129351, 129535, 2), + (129648, 129660, 2), + (129664, 129672, 2), + (129680, 129725, 2), + (129727, 129733, 2), + (129742, 129755, 2), + (129760, 129768, 2), + (129776, 129784, 2), + (131072, 196605, 2), + (196608, 262141, 2), + (917760, 917999, 0), +] diff --git a/py311/lib/python3.11/site-packages/black/brackets.cpython-311-x86_64-linux-gnu.so b/py311/lib/python3.11/site-packages/black/brackets.cpython-311-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..eec539ea5d7ee9d75cafb191d7f4c1dd0577d7e0 Binary files /dev/null and b/py311/lib/python3.11/site-packages/black/brackets.cpython-311-x86_64-linux-gnu.so differ diff --git a/py311/lib/python3.11/site-packages/black/brackets.py b/py311/lib/python3.11/site-packages/black/brackets.py new file mode 100644 index 0000000000000000000000000000000000000000..44a3c9a2946fe65762dc41f5697f3a48a99f6ff5 --- /dev/null +++ b/py311/lib/python3.11/site-packages/black/brackets.py @@ -0,0 +1,383 @@ +"""Builds on top of nodes.py to track brackets.""" + +from collections.abc import Iterable, Sequence +from dataclasses import dataclass, field +from typing import Final, Union + +from black.nodes import ( + BRACKET, + CLOSING_BRACKETS, + COMPARATORS, + LOGIC_OPERATORS, + MATH_OPERATORS, + OPENING_BRACKETS, + UNPACKING_PARENTS, + VARARGS_PARENTS, + is_vararg, + syms, +) +from blib2to3.pgen2 import token +from blib2to3.pytree import Leaf, Node + +# types +LN = Union[Leaf, Node] +Depth = int +LeafID = int +NodeType = int +Priority = int + + +COMPREHENSION_PRIORITY: Final = 20 +COMMA_PRIORITY: Final = 18 +TERNARY_PRIORITY: Final = 16 +LOGIC_PRIORITY: Final = 14 +STRING_PRIORITY: Final = 12 +COMPARATOR_PRIORITY: Final = 10 +MATH_PRIORITIES: Final = { + token.VBAR: 9, + token.CIRCUMFLEX: 8, + token.AMPER: 7, + token.LEFTSHIFT: 6, + token.RIGHTSHIFT: 6, + token.PLUS: 5, + token.MINUS: 5, + token.STAR: 4, + token.SLASH: 4, + token.DOUBLESLASH: 4, + token.PERCENT: 4, + token.AT: 4, + token.TILDE: 3, + token.DOUBLESTAR: 2, +} +DOT_PRIORITY: Final = 1 + + +class BracketMatchError(Exception): + """Raised when an opening bracket is unable to be matched to a closing bracket.""" + + +@dataclass +class BracketTracker: + """Keeps track of brackets on a line.""" + + depth: int = 0 + bracket_match: dict[tuple[Depth, NodeType], Leaf] = field(default_factory=dict) + delimiters: dict[LeafID, Priority] = field(default_factory=dict) + previous: Leaf | None = None + _for_loop_depths: list[int] = field(default_factory=list) + _lambda_argument_depths: list[int] = field(default_factory=list) + invisible: list[Leaf] = field(default_factory=list) + + def mark(self, leaf: Leaf) -> None: + """Mark `leaf` with bracket-related metadata. Keep track of delimiters. + + All leaves receive an int `bracket_depth` field that stores how deep + within brackets a given leaf is. 0 means there are no enclosing brackets + that started on this line. + + If a leaf is itself a closing bracket and there is a matching opening + bracket earlier, it receives an `opening_bracket` field with which it forms a + pair. This is a one-directional link to avoid reference cycles. Closing + bracket without opening happens on lines continued from previous + breaks, e.g. `) -> "ReturnType":` as part of a funcdef where we place + the return type annotation on its own line of the previous closing RPAR. + + If a leaf is a delimiter (a token on which Black can split the line if + needed) and it's on depth 0, its `id()` is stored in the tracker's + `delimiters` field. + """ + if leaf.type == token.COMMENT: + return + + if ( + self.depth == 0 + and leaf.type in CLOSING_BRACKETS + and (self.depth, leaf.type) not in self.bracket_match + ): + return + + self.maybe_decrement_after_for_loop_variable(leaf) + self.maybe_decrement_after_lambda_arguments(leaf) + if leaf.type in CLOSING_BRACKETS: + self.depth -= 1 + try: + opening_bracket = self.bracket_match.pop((self.depth, leaf.type)) + except KeyError as e: + raise BracketMatchError( + "Unable to match a closing bracket to the following opening" + f" bracket: {leaf}" + ) from e + leaf.opening_bracket = opening_bracket + if not leaf.value: + self.invisible.append(leaf) + leaf.bracket_depth = self.depth + if self.depth == 0: + delim = is_split_before_delimiter(leaf, self.previous) + if delim and self.previous is not None: + self.delimiters[id(self.previous)] = delim + else: + delim = is_split_after_delimiter(leaf) + if delim: + self.delimiters[id(leaf)] = delim + if leaf.type in OPENING_BRACKETS: + self.bracket_match[self.depth, BRACKET[leaf.type]] = leaf + self.depth += 1 + if not leaf.value: + self.invisible.append(leaf) + self.previous = leaf + self.maybe_increment_lambda_arguments(leaf) + self.maybe_increment_for_loop_variable(leaf) + + def any_open_for_or_lambda(self) -> bool: + """Return True if there is an open for or lambda expression on the line. + + See maybe_increment_for_loop_variable and maybe_increment_lambda_arguments + for details.""" + return bool(self._for_loop_depths or self._lambda_argument_depths) + + def any_open_brackets(self) -> bool: + """Return True if there is an yet unmatched open bracket on the line.""" + return bool(self.bracket_match) + + def max_delimiter_priority(self, exclude: Iterable[LeafID] = ()) -> Priority: + """Return the highest priority of a delimiter found on the line. + + Values are consistent with what `is_split_*_delimiter()` return. + Raises ValueError on no delimiters. + """ + return max(v for k, v in self.delimiters.items() if k not in exclude) + + def delimiter_count_with_priority(self, priority: Priority = 0) -> int: + """Return the number of delimiters with the given `priority`. + + If no `priority` is passed, defaults to max priority on the line. + """ + if not self.delimiters: + return 0 + + priority = priority or self.max_delimiter_priority() + return sum(1 for p in self.delimiters.values() if p == priority) + + def maybe_increment_for_loop_variable(self, leaf: Leaf) -> bool: + """In a for loop, or comprehension, the variables are often unpacks. + + To avoid splitting on the comma in this situation, increase the depth of + tokens between `for` and `in`. + """ + if leaf.type == token.NAME and leaf.value == "for": + self.depth += 1 + self._for_loop_depths.append(self.depth) + return True + + return False + + def maybe_decrement_after_for_loop_variable(self, leaf: Leaf) -> bool: + """See `maybe_increment_for_loop_variable` above for explanation.""" + if ( + self._for_loop_depths + and self._for_loop_depths[-1] == self.depth + and leaf.type == token.NAME + and leaf.value == "in" + ): + self.depth -= 1 + self._for_loop_depths.pop() + return True + + return False + + def maybe_increment_lambda_arguments(self, leaf: Leaf) -> bool: + """In a lambda expression, there might be more than one argument. + + To avoid splitting on the comma in this situation, increase the depth of + tokens between `lambda` and `:`. + """ + if leaf.type == token.NAME and leaf.value == "lambda": + self.depth += 1 + self._lambda_argument_depths.append(self.depth) + return True + + return False + + def maybe_decrement_after_lambda_arguments(self, leaf: Leaf) -> bool: + """See `maybe_increment_lambda_arguments` above for explanation.""" + if ( + self._lambda_argument_depths + and self._lambda_argument_depths[-1] == self.depth + and leaf.type == token.COLON + ): + self.depth -= 1 + self._lambda_argument_depths.pop() + return True + + return False + + def get_open_lsqb(self) -> Leaf | None: + """Return the most recent opening square bracket (if any).""" + return self.bracket_match.get((self.depth - 1, token.RSQB)) + + +def is_split_after_delimiter(leaf: Leaf) -> Priority: + """Return the priority of the `leaf` delimiter, given a line break after it. + + The delimiter priorities returned here are from those delimiters that would + cause a line break after themselves. + + Higher numbers are higher priority. + """ + if leaf.type == token.COMMA: + return COMMA_PRIORITY + + return 0 + + +def is_split_before_delimiter(leaf: Leaf, previous: Leaf | None = None) -> Priority: + """Return the priority of the `leaf` delimiter, given a line break before it. + + The delimiter priorities returned here are from those delimiters that would + cause a line break before themselves. + + Higher numbers are higher priority. + """ + if is_vararg(leaf, within=VARARGS_PARENTS | UNPACKING_PARENTS): + # * and ** might also be MATH_OPERATORS but in this case they are not. + # Don't treat them as a delimiter. + return 0 + + if ( + leaf.type == token.DOT + and leaf.parent + and leaf.parent.type not in {syms.import_from, syms.dotted_name} + and (previous is None or previous.type in CLOSING_BRACKETS) + ): + return DOT_PRIORITY + + if ( + leaf.type in MATH_OPERATORS + and leaf.parent + and leaf.parent.type not in {syms.factor, syms.star_expr} + ): + return MATH_PRIORITIES[leaf.type] + + if leaf.type in COMPARATORS: + return COMPARATOR_PRIORITY + + if ( + leaf.type == token.STRING + and previous is not None + and previous.type == token.STRING + ): + return STRING_PRIORITY + + if leaf.type not in {token.NAME, token.ASYNC}: + return 0 + + if ( + leaf.value == "for" + and leaf.parent + and leaf.parent.type in {syms.comp_for, syms.old_comp_for} + or leaf.type == token.ASYNC + ): + if ( + not isinstance(leaf.prev_sibling, Leaf) + or leaf.prev_sibling.value != "async" + ): + return COMPREHENSION_PRIORITY + + if ( + leaf.value == "if" + and leaf.parent + and leaf.parent.type in {syms.comp_if, syms.old_comp_if} + ): + return COMPREHENSION_PRIORITY + + if leaf.value in {"if", "else"} and leaf.parent and leaf.parent.type == syms.test: + return TERNARY_PRIORITY + + if leaf.value == "is": + return COMPARATOR_PRIORITY + + if ( + leaf.value == "in" + and leaf.parent + and leaf.parent.type in {syms.comp_op, syms.comparison} + and not ( + previous is not None + and previous.type == token.NAME + and previous.value == "not" + ) + ): + return COMPARATOR_PRIORITY + + if ( + leaf.value == "not" + and leaf.parent + and leaf.parent.type == syms.comp_op + and not ( + previous is not None + and previous.type == token.NAME + and previous.value == "is" + ) + ): + return COMPARATOR_PRIORITY + + if leaf.value in LOGIC_OPERATORS and leaf.parent: + return LOGIC_PRIORITY + + return 0 + + +def max_delimiter_priority_in_atom(node: LN) -> Priority: + """Return maximum delimiter priority inside `node`. + + This is specific to atoms with contents contained in a pair of parentheses. + If `node` isn't an atom or there are no enclosing parentheses, returns 0. + """ + if node.type != syms.atom: + return 0 + + first = node.children[0] + last = node.children[-1] + if not (first.type == token.LPAR and last.type == token.RPAR): + return 0 + + bt = BracketTracker() + for c in node.children[1:-1]: + if isinstance(c, Leaf): + bt.mark(c) + else: + for leaf in c.leaves(): + bt.mark(leaf) + try: + return bt.max_delimiter_priority() + + except ValueError: + return 0 + + +def get_leaves_inside_matching_brackets(leaves: Sequence[Leaf]) -> set[LeafID]: + """Return leaves that are inside matching brackets. + + The input `leaves` can have non-matching brackets at the head or tail parts. + Matching brackets are included. + """ + try: + # Start with the first opening bracket and ignore closing brackets before. + start_index = next( + i for i, l in enumerate(leaves) if l.type in OPENING_BRACKETS + ) + except StopIteration: + return set() + bracket_stack = [] + ids = set() + for i in range(start_index, len(leaves)): + leaf = leaves[i] + if leaf.type in OPENING_BRACKETS: + bracket_stack.append((BRACKET[leaf.type], i)) + if leaf.type in CLOSING_BRACKETS: + if bracket_stack and leaf.type == bracket_stack[-1][0]: + _, start = bracket_stack.pop() + for j in range(start, i + 1): + ids.add(id(leaves[j])) + else: + break + return ids diff --git a/py311/lib/python3.11/site-packages/black/cache.cpython-311-x86_64-linux-gnu.so b/py311/lib/python3.11/site-packages/black/cache.cpython-311-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..2aef25e1277f5dfb8fe7256257a7407ac6550574 Binary files /dev/null and b/py311/lib/python3.11/site-packages/black/cache.cpython-311-x86_64-linux-gnu.so differ diff --git a/py311/lib/python3.11/site-packages/black/cache.py b/py311/lib/python3.11/site-packages/black/cache.py new file mode 100644 index 0000000000000000000000000000000000000000..ef9d99a7b901c772c4076b8b13c030d325900c61 --- /dev/null +++ b/py311/lib/python3.11/site-packages/black/cache.py @@ -0,0 +1,150 @@ +"""Caching of formatted files with feature-based invalidation.""" + +import hashlib +import os +import pickle +import sys +import tempfile +from collections.abc import Iterable +from dataclasses import dataclass, field +from pathlib import Path +from typing import NamedTuple + +from platformdirs import user_cache_dir + +from _black_version import version as __version__ +from black.mode import Mode +from black.output import err + +if sys.version_info >= (3, 11): + from typing import Self +else: + from typing_extensions import Self + + +class FileData(NamedTuple): + st_mtime: float + st_size: int + hash: str + + +def get_cache_dir() -> Path: + """Get the cache directory used by black. + + Users can customize this directory on all systems using `BLACK_CACHE_DIR` + environment variable. By default, the cache directory is the user cache directory + under the black application. + + This result is immediately set to a constant `black.cache.CACHE_DIR` as to avoid + repeated calls. + """ + # NOTE: Function mostly exists as a clean way to test getting the cache directory. + default_cache_dir = user_cache_dir("black") + cache_dir = Path(os.environ.get("BLACK_CACHE_DIR", default_cache_dir)) + cache_dir = cache_dir / __version__ + return cache_dir + + +CACHE_DIR = get_cache_dir() + + +def get_cache_file(mode: Mode) -> Path: + return CACHE_DIR / f"cache.{mode.get_cache_key()}.pickle" + + +@dataclass +class Cache: + mode: Mode + cache_file: Path + file_data: dict[str, FileData] = field(default_factory=dict) + + @classmethod + def read(cls, mode: Mode) -> Self: + """Read the cache if it exists and is well-formed. + + If it is not well-formed, the call to write later should + resolve the issue. + """ + cache_file = get_cache_file(mode) + try: + exists = cache_file.exists() + except OSError as e: + # Likely file too long; see #4172 and #4174 + err(f"Unable to read cache file {cache_file} due to {e}") + return cls(mode, cache_file) + if not exists: + return cls(mode, cache_file) + + with cache_file.open("rb") as fobj: + try: + data: dict[str, tuple[float, int, str]] = pickle.load(fobj) + file_data = {k: FileData(*v) for k, v in data.items()} + except (pickle.UnpicklingError, ValueError, IndexError): + return cls(mode, cache_file) + + return cls(mode, cache_file, file_data) + + @staticmethod + def hash_digest(path: Path) -> str: + """Return hash digest for path.""" + + data = path.read_bytes() + return hashlib.sha256(data).hexdigest() + + @staticmethod + def get_file_data(path: Path) -> FileData: + """Return file data for path.""" + + stat = path.stat() + hash = Cache.hash_digest(path) + return FileData(stat.st_mtime, stat.st_size, hash) + + def is_changed(self, source: Path) -> bool: + """Check if source has changed compared to cached version.""" + res_src = source.resolve() + old = self.file_data.get(str(res_src)) + if old is None: + return True + + st = res_src.stat() + if st.st_size != old.st_size: + return True + if st.st_mtime != old.st_mtime: + new_hash = Cache.hash_digest(res_src) + if new_hash != old.hash: + return True + return False + + def filtered_cached(self, sources: Iterable[Path]) -> tuple[set[Path], set[Path]]: + """Split an iterable of paths in `sources` into two sets. + + The first contains paths of files that modified on disk or are not in the + cache. The other contains paths to non-modified files. + """ + changed: set[Path] = set() + done: set[Path] = set() + for src in sources: + if self.is_changed(src): + changed.add(src) + else: + done.add(src) + return changed, done + + def write(self, sources: Iterable[Path]) -> None: + """Update the cache file data and write a new cache file.""" + self.file_data.update( + **{str(src.resolve()): Cache.get_file_data(src) for src in sources} + ) + try: + CACHE_DIR.mkdir(parents=True, exist_ok=True) + with tempfile.NamedTemporaryFile( + dir=str(self.cache_file.parent), delete=False + ) as f: + # We store raw tuples in the cache because it's faster. + data: dict[str, tuple[float, int, str]] = { + k: (*v,) for k, v in self.file_data.items() + } + pickle.dump(data, f, protocol=4) + os.replace(f.name, self.cache_file) + except OSError: + pass diff --git a/py311/lib/python3.11/site-packages/black/comments.cpython-311-x86_64-linux-gnu.so b/py311/lib/python3.11/site-packages/black/comments.cpython-311-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..a647e1c179494dae2e5f093ed853c8d6c19a3812 Binary files /dev/null and b/py311/lib/python3.11/site-packages/black/comments.cpython-311-x86_64-linux-gnu.so differ diff --git a/py311/lib/python3.11/site-packages/black/comments.py b/py311/lib/python3.11/site-packages/black/comments.py new file mode 100644 index 0000000000000000000000000000000000000000..c4c8f799b9737f3dbe5770d6d5486e2b813f19b8 --- /dev/null +++ b/py311/lib/python3.11/site-packages/black/comments.py @@ -0,0 +1,839 @@ +import re +from collections.abc import Collection, Iterator +from dataclasses import dataclass +from functools import lru_cache +from typing import Final, Union + +from black.mode import Mode, Preview +from black.nodes import ( + CLOSING_BRACKETS, + STANDALONE_COMMENT, + STATEMENT, + WHITESPACE, + container_of, + first_leaf_of, + is_type_comment_string, + make_simple_prefix, + preceding_leaf, + syms, +) +from blib2to3.pgen2 import token +from blib2to3.pytree import Leaf, Node + +# types +LN = Union[Leaf, Node] + +FMT_OFF: Final = {"# fmt: off", "# fmt:off", "# yapf: disable"} +FMT_SKIP: Final = {"# fmt: skip", "# fmt:skip"} +FMT_ON: Final = {"# fmt: on", "# fmt:on", "# yapf: enable"} + +# Compound statements we care about for fmt: skip handling +# (excludes except_clause and case_block which aren't standalone compound statements) +_COMPOUND_STATEMENTS: Final = STATEMENT - {syms.except_clause, syms.case_block} + +COMMENT_EXCEPTIONS = " !:#'" +_COMMENT_PREFIX = "# " +_COMMENT_LIST_SEPARATOR = ";" + + +@dataclass +class ProtoComment: + """Describes a piece of syntax that is a comment. + + It's not a :class:`blib2to3.pytree.Leaf` so that: + + * it can be cached (`Leaf` objects should not be reused more than once as + they store their lineno, column, prefix, and parent information); + * `newlines` and `consumed` fields are kept separate from the `value`. This + simplifies handling of special marker comments like ``# fmt: off/on``. + """ + + type: int # token.COMMENT or STANDALONE_COMMENT + value: str # content of the comment + newlines: int # how many newlines before the comment + consumed: int # how many characters of the original leaf's prefix did we consume + form_feed: bool # is there a form feed before the comment + leading_whitespace: str # leading whitespace before the comment, if any + + +def generate_comments(leaf: LN, mode: Mode) -> Iterator[Leaf]: + """Clean the prefix of the `leaf` and generate comments from it, if any. + + Comments in lib2to3 are shoved into the whitespace prefix. This happens + in `pgen2/driver.py:Driver.parse_tokens()`. This was a brilliant implementation + move because it does away with modifying the grammar to include all the + possible places in which comments can be placed. + + The sad consequence for us though is that comments don't "belong" anywhere. + This is why this function generates simple parentless Leaf objects for + comments. We simply don't know what the correct parent should be. + + No matter though, we can live without this. We really only need to + differentiate between inline and standalone comments. The latter don't + share the line with any code. + + Inline comments are emitted as regular token.COMMENT leaves. Standalone + are emitted with a fake STANDALONE_COMMENT token identifier. + """ + total_consumed = 0 + for pc in list_comments( + leaf.prefix, is_endmarker=leaf.type == token.ENDMARKER, mode=mode + ): + total_consumed = pc.consumed + prefix = make_simple_prefix(pc.newlines, pc.form_feed) + yield Leaf(pc.type, pc.value, prefix=prefix) + normalize_trailing_prefix(leaf, total_consumed) + + +@lru_cache(maxsize=4096) +def list_comments(prefix: str, *, is_endmarker: bool, mode: Mode) -> list[ProtoComment]: + """Return a list of :class:`ProtoComment` objects parsed from the given `prefix`.""" + result: list[ProtoComment] = [] + if not prefix or "#" not in prefix: + return result + + consumed = 0 + nlines = 0 + ignored_lines = 0 + form_feed = False + for index, full_line in enumerate(re.split("\r?\n|\r", prefix)): + consumed += len(full_line) + 1 # adding the length of the split '\n' + match = re.match(r"^(\s*)(\S.*|)$", full_line) + assert match + whitespace, line = match.groups() + if not line: + nlines += 1 + if "\f" in full_line: + form_feed = True + if not line.startswith("#"): + # Escaped newlines outside of a comment are not really newlines at + # all. We treat a single-line comment following an escaped newline + # as a simple trailing comment. + if line.endswith("\\"): + ignored_lines += 1 + continue + + if index == ignored_lines and not is_endmarker: + comment_type = token.COMMENT # simple trailing comment + else: + comment_type = STANDALONE_COMMENT + comment = make_comment(line, mode=mode) + result.append( + ProtoComment( + type=comment_type, + value=comment, + newlines=nlines, + consumed=consumed, + form_feed=form_feed, + leading_whitespace=whitespace, + ) + ) + form_feed = False + nlines = 0 + return result + + +def normalize_trailing_prefix(leaf: LN, total_consumed: int) -> None: + """Normalize the prefix that's left over after generating comments. + + Note: don't use backslashes for formatting or you'll lose your voting rights. + """ + remainder = leaf.prefix[total_consumed:] + if "\\" not in remainder: + nl_count = remainder.count("\n") + form_feed = "\f" in remainder and remainder.endswith("\n") + leaf.prefix = make_simple_prefix(nl_count, form_feed) + return + + leaf.prefix = "" + + +def make_comment(content: str, mode: Mode) -> str: + """Return a consistently formatted comment from the given `content` string. + + All comments (except for "##", "#!", "#:", '#'") should have a single + space between the hash sign and the content. + + If `content` didn't start with a hash sign, one is provided. + + Comments containing fmt directives are preserved exactly as-is to respect + user intent (e.g., `#no space # fmt: skip` stays as-is). + """ + content = content.rstrip() + if not content: + return "#" + + # Preserve comments with fmt directives exactly as-is + if content.startswith("#") and contains_fmt_directive(content): + return content + + if content[0] == "#": + content = content[1:] + if ( + content + and content[0] == "\N{NO-BREAK SPACE}" + and not is_type_comment_string("# " + content.lstrip(), mode=mode) + ): + content = " " + content[1:] # Replace NBSP by a simple space + if ( + Preview.standardize_type_comments in mode + and content + and "\N{NO-BREAK SPACE}" not in content + and is_type_comment_string("#" + content, mode=mode) + ): + type_part, value_part = content.split(":", 1) + content = type_part.strip() + ": " + value_part.strip() + + if content and content[0] not in COMMENT_EXCEPTIONS: + content = " " + content + return "#" + content + + +def normalize_fmt_off( + node: Node, mode: Mode, lines: Collection[tuple[int, int]] +) -> None: + """Convert content between `# fmt: off`/`# fmt: on` into standalone comments.""" + try_again = True + while try_again: + try_again = convert_one_fmt_off_pair(node, mode, lines) + + +def _should_process_fmt_comment( + comment: ProtoComment, leaf: Leaf +) -> tuple[bool, bool, bool]: + """Check if comment should be processed for fmt handling. + + Returns (should_process, is_fmt_off, is_fmt_skip). + """ + is_fmt_off = contains_fmt_directive(comment.value, FMT_OFF) + is_fmt_skip = contains_fmt_directive(comment.value, FMT_SKIP) + + if not is_fmt_off and not is_fmt_skip: + return False, False, False + + # Invalid use when `# fmt: off` is applied before a closing bracket + if is_fmt_off and leaf.type in CLOSING_BRACKETS: + return False, False, False + + return True, is_fmt_off, is_fmt_skip + + +def _is_valid_standalone_fmt_comment( + comment: ProtoComment, leaf: Leaf, is_fmt_off: bool, is_fmt_skip: bool +) -> bool: + """Check if comment is a valid standalone fmt directive. + + We only want standalone comments. If there's no previous leaf or if + the previous leaf is indentation, it's a standalone comment in disguise. + """ + if comment.type == STANDALONE_COMMENT: + return True + + prev = preceding_leaf(leaf) + if not prev: + return True + + # Treat STANDALONE_COMMENT nodes as whitespace for check + if is_fmt_off and prev.type not in WHITESPACE and prev.type != STANDALONE_COMMENT: + return False + if is_fmt_skip and prev.type in WHITESPACE: + return False + + return True + + +def _handle_comment_only_fmt_block( + leaf: Leaf, + comment: ProtoComment, + previous_consumed: int, + mode: Mode, +) -> bool: + """Handle fmt:off/on blocks that contain only comments. + + Returns True if a block was converted, False otherwise. + """ + all_comments = list_comments(leaf.prefix, is_endmarker=False, mode=mode) + + # Find the first fmt:off and its matching fmt:on + fmt_off_idx = None + fmt_on_idx = None + for idx, c in enumerate(all_comments): + if fmt_off_idx is None and contains_fmt_directive(c.value, FMT_OFF): + fmt_off_idx = idx + if ( + fmt_off_idx is not None + and idx > fmt_off_idx + and contains_fmt_directive(c.value, FMT_ON) + ): + fmt_on_idx = idx + break + + # Only proceed if we found both directives + if fmt_on_idx is None or fmt_off_idx is None: + return False + + comment = all_comments[fmt_off_idx] + fmt_on_comment = all_comments[fmt_on_idx] + original_prefix = leaf.prefix + + # Build the hidden value + start_pos = comment.consumed + end_pos = fmt_on_comment.consumed + content_between_and_fmt_on = original_prefix[start_pos:end_pos] + hidden_value = comment.value + "\n" + content_between_and_fmt_on + + if hidden_value.endswith("\n"): + hidden_value = hidden_value[:-1] + + # Build the standalone comment prefix - preserve all content before fmt:off + # including any comments that precede it + if fmt_off_idx == 0: + # No comments before fmt:off, use previous_consumed + pre_fmt_off_consumed = previous_consumed + else: + # Use the consumed position of the last comment before fmt:off + # This preserves all comments and content before the fmt:off directive + pre_fmt_off_consumed = all_comments[fmt_off_idx - 1].consumed + + standalone_comment_prefix = ( + original_prefix[:pre_fmt_off_consumed] + "\n" * comment.newlines + ) + + fmt_off_prefix = original_prefix.split(comment.value)[0] + if "\n" in fmt_off_prefix: + fmt_off_prefix = fmt_off_prefix.split("\n")[-1] + standalone_comment_prefix += fmt_off_prefix + + # Update leaf prefix + leaf.prefix = original_prefix[fmt_on_comment.consumed :] + + # Insert the STANDALONE_COMMENT + parent = leaf.parent + assert parent is not None, "INTERNAL ERROR: fmt: on/off handling (prefix only)" + + leaf_idx = None + for idx, child in enumerate(parent.children): + if child is leaf: + leaf_idx = idx + break + + assert leaf_idx is not None, "INTERNAL ERROR: fmt: on/off handling (leaf index)" + + parent.insert_child( + leaf_idx, + Leaf( + STANDALONE_COMMENT, + hidden_value, + prefix=standalone_comment_prefix, + fmt_pass_converted_first_leaf=None, + ), + ) + return True + + +def convert_one_fmt_off_pair( + node: Node, mode: Mode, lines: Collection[tuple[int, int]] +) -> bool: + """Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment. + + Returns True if a pair was converted. + """ + for leaf in node.leaves(): + # Skip STANDALONE_COMMENT nodes that were created by fmt:off/on/skip processing + # to avoid reprocessing them in subsequent iterations + if leaf.type == STANDALONE_COMMENT and hasattr( + leaf, "fmt_pass_converted_first_leaf" + ): + continue + + previous_consumed = 0 + for comment in list_comments(leaf.prefix, is_endmarker=False, mode=mode): + should_process, is_fmt_off, is_fmt_skip = _should_process_fmt_comment( + comment, leaf + ) + if not should_process: + previous_consumed = comment.consumed + continue + + if not _is_valid_standalone_fmt_comment( + comment, leaf, is_fmt_off, is_fmt_skip + ): + previous_consumed = comment.consumed + continue + + ignored_nodes = list(generate_ignored_nodes(leaf, comment, mode)) + + # Handle comment-only blocks + if not ignored_nodes and is_fmt_off: + if _handle_comment_only_fmt_block( + leaf, comment, previous_consumed, mode + ): + return True + continue + + # Need actual nodes to process + if not ignored_nodes: + continue + + # Handle regular fmt blocks + + _handle_regular_fmt_block( + ignored_nodes, + comment, + previous_consumed, + is_fmt_skip, + lines, + leaf, + ) + return True + + return False + + +def _handle_regular_fmt_block( + ignored_nodes: list[LN], + comment: ProtoComment, + previous_consumed: int, + is_fmt_skip: bool, + lines: Collection[tuple[int, int]], + leaf: Leaf, +) -> None: + """Handle fmt blocks with actual AST nodes.""" + first = ignored_nodes[0] # Can be a container node with the `leaf`. + parent = first.parent + prefix = first.prefix + + if contains_fmt_directive(comment.value, FMT_OFF): + first.prefix = prefix[comment.consumed :] + if is_fmt_skip: + first.prefix = "" + standalone_comment_prefix = prefix + else: + standalone_comment_prefix = prefix[:previous_consumed] + "\n" * comment.newlines + + # Ensure STANDALONE_COMMENT nodes have trailing newlines when stringified + # This prevents multiple fmt: skip comments from being concatenated on one line + parts = [] + for node in ignored_nodes: + if isinstance(node, Leaf) and node.type == STANDALONE_COMMENT: + # Add newline after STANDALONE_COMMENT Leaf + node_str = str(node) + if not node_str.endswith("\n"): + node_str += "\n" + parts.append(node_str) + elif isinstance(node, Node): + # For nodes that might contain STANDALONE_COMMENT leaves, + # we need custom stringify + has_standalone = any( + leaf.type == STANDALONE_COMMENT for leaf in node.leaves() + ) + if has_standalone: + # Stringify node with STANDALONE_COMMENT leaves having trailing newlines + def stringify_node(n: LN) -> str: + if isinstance(n, Leaf): + if n.type == STANDALONE_COMMENT: + result = n.prefix + n.value + if not result.endswith("\n"): + result += "\n" + return result + return str(n) + else: + # For nested nodes, recursively process children + return "".join(stringify_node(child) for child in n.children) + + parts.append(stringify_node(node)) + else: + parts.append(str(node)) + else: + parts.append(str(node)) + + hidden_value = "".join(parts) + comment_lineno = leaf.lineno - comment.newlines + + if contains_fmt_directive(comment.value, FMT_OFF): + fmt_off_prefix = "" + if len(lines) > 0 and not any( + line[0] <= comment_lineno <= line[1] for line in lines + ): + # keeping indentation of comment by preserving original whitespaces. + fmt_off_prefix = prefix.split(comment.value)[0] + if "\n" in fmt_off_prefix: + fmt_off_prefix = fmt_off_prefix.split("\n")[-1] + standalone_comment_prefix += fmt_off_prefix + hidden_value = comment.value + "\n" + hidden_value + + if is_fmt_skip: + hidden_value += comment.leading_whitespace + comment.value + + if hidden_value.endswith("\n"): + # That happens when one of the `ignored_nodes` ended with a NEWLINE + # leaf (possibly followed by a DEDENT). + hidden_value = hidden_value[:-1] + + first_idx: int | None = None + for ignored in ignored_nodes: + index = ignored.remove() + if first_idx is None: + first_idx = index + + assert parent is not None, "INTERNAL ERROR: fmt: on/off handling (1)" + assert first_idx is not None, "INTERNAL ERROR: fmt: on/off handling (2)" + + parent.insert_child( + first_idx, + Leaf( + STANDALONE_COMMENT, + hidden_value, + prefix=standalone_comment_prefix, + fmt_pass_converted_first_leaf=first_leaf_of(first), + ), + ) + + +def generate_ignored_nodes( + leaf: Leaf, comment: ProtoComment, mode: Mode +) -> Iterator[LN]: + """Starting from the container of `leaf`, generate all leaves until `# fmt: on`. + + If comment is skip, returns leaf only. + Stops at the end of the block. + """ + if contains_fmt_directive(comment.value, FMT_SKIP): + yield from _generate_ignored_nodes_from_fmt_skip(leaf, comment, mode) + return + container: LN | None = container_of(leaf) + while container is not None and container.type != token.ENDMARKER: + if is_fmt_on(container, mode=mode): + return + + # fix for fmt: on in children + if children_contains_fmt_on(container, mode=mode): + for index, child in enumerate(container.children): + if isinstance(child, Leaf) and is_fmt_on(child, mode=mode): + if child.type in CLOSING_BRACKETS: + # This means `# fmt: on` is placed at a different bracket level + # than `# fmt: off`. This is an invalid use, but as a courtesy, + # we include this closing bracket in the ignored nodes. + # The alternative is to fail the formatting. + yield child + return + if ( + child.type == token.INDENT + and index < len(container.children) - 1 + and children_contains_fmt_on( + container.children[index + 1], mode=mode + ) + ): + # This means `# fmt: on` is placed right after an indentation + # level, and we shouldn't swallow the previous INDENT token. + return + if children_contains_fmt_on(child, mode=mode): + return + yield child + else: + if container.type == token.DEDENT and container.next_sibling is None: + # This can happen when there is no matching `# fmt: on` comment at the + # same level as `# fmt: on`. We need to keep this DEDENT. + return + yield container + container = container.next_sibling + + +def _find_compound_statement_context(parent: Node) -> Node | None: + """Return the body node of a compound statement if we should respect fmt: skip. + + This handles one-line compound statements like: + if condition: body # fmt: skip + + When Black expands such statements, they temporarily look like: + if condition: + body # fmt: skip + + In both cases, we want to return the body node (either the simple_stmt directly + or the suite containing it). + """ + if parent.type != syms.simple_stmt: + return None + + if not isinstance(parent.parent, Node): + return None + + # Case 1: Expanded form after Black's initial formatting pass. + # The one-liner has been split across multiple lines: + # if True: + # print("a"); print("b") # fmt: skip + # Structure: compound_stmt -> suite -> simple_stmt + if ( + parent.parent.type == syms.suite + and isinstance(parent.parent.parent, Node) + and parent.parent.parent.type in _COMPOUND_STATEMENTS + ): + return parent.parent + + # Case 2: Original one-line form from the input source. + # The statement is still on a single line: + # if True: print("a"); print("b") # fmt: skip + # Structure: compound_stmt -> simple_stmt + if parent.parent.type in _COMPOUND_STATEMENTS: + return parent + + return None + + +def _should_keep_compound_statement_inline( + body_node: Node, simple_stmt_parent: Node +) -> bool: + """Check if a compound statement should be kept on one line. + + Returns True only for compound statements with semicolon-separated bodies, + like: if True: print("a"); print("b") # fmt: skip + """ + # Check if there are semicolons in the body + for leaf in body_node.leaves(): + if leaf.type == token.SEMI: + # Verify it's a single-line body (one simple_stmt) + if body_node.type == syms.suite: + # After formatting: check suite has one simple_stmt child + simple_stmts = [ + child + for child in body_node.children + if child.type == syms.simple_stmt + ] + return len(simple_stmts) == 1 and simple_stmts[0] is simple_stmt_parent + else: + # Original form: body_node IS the simple_stmt + return body_node is simple_stmt_parent + return False + + +def _get_compound_statement_header( + body_node: Node, simple_stmt_parent: Node +) -> list[LN]: + """Get header nodes for a compound statement that should be preserved inline.""" + if not _should_keep_compound_statement_inline(body_node, simple_stmt_parent): + return [] + + # Get the compound statement (parent of body) + compound_stmt = body_node.parent + if compound_stmt is None or compound_stmt.type not in _COMPOUND_STATEMENTS: + return [] + + # Collect all header leaves before the body + header_leaves: list[LN] = [] + for child in compound_stmt.children: + if child is body_node: + break + if isinstance(child, Leaf): + if child.type not in (token.NEWLINE, token.INDENT): + header_leaves.append(child) + else: + header_leaves.extend(child.leaves()) + return header_leaves + + +def _generate_ignored_nodes_from_fmt_skip( + leaf: Leaf, comment: ProtoComment, mode: Mode +) -> Iterator[LN]: + """Generate all leaves that should be ignored by the `# fmt: skip` from `leaf`.""" + prev_sibling = leaf.prev_sibling + parent = leaf.parent + ignored_nodes: list[LN] = [] + # Need to properly format the leaf prefix to compare it to comment.value, + # which is also formatted + comments = list_comments(leaf.prefix, is_endmarker=False, mode=mode) + if not comments or comment.value != comments[0].value: + return + + if Preview.fix_fmt_skip_in_one_liners in mode and not prev_sibling and parent: + prev_sibling = parent.prev_sibling + + if prev_sibling is not None: + leaf.prefix = leaf.prefix[comment.consumed :] + + if Preview.fix_fmt_skip_in_one_liners not in mode: + siblings = [prev_sibling] + while ( + "\n" not in prev_sibling.prefix + and prev_sibling.prev_sibling is not None + ): + prev_sibling = prev_sibling.prev_sibling + siblings.insert(0, prev_sibling) + yield from siblings + return + + # Generates the nodes to be ignored by `fmt: skip`. + + # Nodes to ignore are the ones on the same line as the + # `# fmt: skip` comment, excluding the `# fmt: skip` + # node itself. + + # Traversal process (starting at the `# fmt: skip` node): + # 1. Move to the `prev_sibling` of the current node. + # 2. If `prev_sibling` has children, go to its rightmost leaf. + # 3. If there's no `prev_sibling`, move up to the parent + # node and repeat. + # 4. Continue until: + # a. You encounter an `INDENT` or `NEWLINE` node (indicates + # start of the line). + # b. You reach the root node. + + # Include all visited LEAVES in the ignored list, except INDENT + # or NEWLINE leaves. + + current_node = prev_sibling + ignored_nodes = [current_node] + if current_node.prev_sibling is None and current_node.parent is not None: + current_node = current_node.parent + + # Track seen nodes to detect cycles that can occur after tree modifications + seen_nodes = {id(current_node)} + + while "\n" not in current_node.prefix and current_node.prev_sibling is not None: + leaf_nodes = list(current_node.prev_sibling.leaves()) + next_node = leaf_nodes[-1] if leaf_nodes else current_node + + # Detect infinite loop - if we've seen this node before, stop + # This can happen when STANDALONE_COMMENT nodes are inserted + # during processing + if id(next_node) in seen_nodes: + break + + current_node = next_node + seen_nodes.add(id(current_node)) + + # Stop if we encounter a STANDALONE_COMMENT created by fmt processing + if ( + isinstance(current_node, Leaf) + and current_node.type == STANDALONE_COMMENT + and hasattr(current_node, "fmt_pass_converted_first_leaf") + ): + break + + if ( + current_node.type in CLOSING_BRACKETS + and current_node.parent + and current_node.parent.type == syms.atom + ): + current_node = current_node.parent + + if current_node.type in (token.NEWLINE, token.INDENT): + current_node.prefix = "" + break + + if current_node.type == token.DEDENT: + break + + # Special case for with expressions + # Without this, we can stuck inside the asexpr_test's children's children + if ( + current_node.parent + and current_node.parent.type == syms.asexpr_test + and current_node.parent.parent + and current_node.parent.parent.type == syms.with_stmt + ): + current_node = current_node.parent + + ignored_nodes.insert(0, current_node) + + if current_node.prev_sibling is None and current_node.parent is not None: + current_node = current_node.parent + + # Special handling for compound statements with semicolon-separated bodies + if Preview.fix_fmt_skip_in_one_liners in mode and isinstance(parent, Node): + body_node = _find_compound_statement_context(parent) + if body_node is not None: + header_nodes = _get_compound_statement_header(body_node, parent) + if header_nodes: + ignored_nodes = header_nodes + ignored_nodes + + yield from ignored_nodes + elif ( + parent is not None and parent.type == syms.suite and leaf.type == token.NEWLINE + ): + # The `# fmt: skip` is on the colon line of the if/while/def/class/... + # statements. The ignored nodes should be previous siblings of the + # parent suite node. + leaf.prefix = "" + parent_sibling = parent.prev_sibling + while parent_sibling is not None and parent_sibling.type != syms.suite: + ignored_nodes.insert(0, parent_sibling) + parent_sibling = parent_sibling.prev_sibling + # Special case for `async_stmt` where the ASYNC token is on the + # grandparent node. + grandparent = parent.parent + if ( + grandparent is not None + and grandparent.prev_sibling is not None + and grandparent.prev_sibling.type == token.ASYNC + ): + ignored_nodes.insert(0, grandparent.prev_sibling) + yield from iter(ignored_nodes) + + +def is_fmt_on(container: LN, mode: Mode) -> bool: + """Determine whether formatting is switched on within a container. + Determined by whether the last `# fmt:` comment is `on` or `off`. + """ + fmt_on = False + for comment in list_comments(container.prefix, is_endmarker=False, mode=mode): + if contains_fmt_directive(comment.value, FMT_ON): + fmt_on = True + elif contains_fmt_directive(comment.value, FMT_OFF): + fmt_on = False + return fmt_on + + +def children_contains_fmt_on(container: LN, mode: Mode) -> bool: + """Determine if children have formatting switched on.""" + for child in container.children: + leaf = first_leaf_of(child) + if leaf is not None and is_fmt_on(leaf, mode=mode): + return True + + return False + + +def contains_pragma_comment(comment_list: list[Leaf]) -> bool: + """ + Returns: + True iff one of the comments in @comment_list is a pragma used by one + of the more common static analysis tools for python (e.g. mypy, flake8, + pylint). + """ + for comment in comment_list: + if comment.value.startswith(("# type:", "# noqa", "# pylint:")): + return True + + return False + + +def contains_fmt_directive( + comment_line: str, directives: set[str] = FMT_OFF | FMT_ON | FMT_SKIP +) -> bool: + """ + Checks if the given comment contains format directives, alone or paired with + other comments. + + Defaults to checking all directives (skip, off, on, yapf), but can be + narrowed to specific ones. + + Matching styles: + # foobar <-- single comment + # foobar # foobar # foobar <-- multiple comments + # foobar; foobar <-- list of comments (; separated) + """ + semantic_comment_blocks = [ + comment_line, + *[ + _COMMENT_PREFIX + comment.strip() + for comment in comment_line.split(_COMMENT_PREFIX)[1:] + ], + *[ + _COMMENT_PREFIX + comment.strip() + for comment in comment_line.strip(_COMMENT_PREFIX).split( + _COMMENT_LIST_SEPARATOR + ) + ], + ] + + return any(comment in directives for comment in semantic_comment_blocks) diff --git a/py311/lib/python3.11/site-packages/black/concurrency.py b/py311/lib/python3.11/site-packages/black/concurrency.py new file mode 100644 index 0000000000000000000000000000000000000000..e939dc34681a57cf180c69e8b39999a721da5442 --- /dev/null +++ b/py311/lib/python3.11/site-packages/black/concurrency.py @@ -0,0 +1,204 @@ +""" +Formatting many files at once via multiprocessing. Contains entrypoint and utilities. + +NOTE: this module is only imported if we need to format several files at once. +""" + +from __future__ import annotations + +import asyncio +import logging +import os +import signal +import sys +import traceback +from collections.abc import Iterable +from concurrent.futures import Executor, ProcessPoolExecutor, ThreadPoolExecutor +from multiprocessing import Manager +from pathlib import Path +from typing import Any + +from mypy_extensions import mypyc_attr + +from black import WriteBack, format_file_in_place +from black.cache import Cache +from black.mode import Mode +from black.output import err +from black.report import Changed, Report + + +def maybe_install_uvloop() -> None: + """If our environment has uvloop installed we use it. + + This is called only from command-line entry points to avoid + interfering with the parent process if Black is used as a library. + """ + try: + import uvloop + + uvloop.install() + except ImportError: + pass + + +def cancel(tasks: Iterable[asyncio.Future[Any]]) -> None: + """asyncio signal handler that cancels all `tasks` and reports to stderr.""" + err("Aborted!") + for task in tasks: + task.cancel() + + +def shutdown(loop: asyncio.AbstractEventLoop) -> None: + """Cancel all pending tasks on `loop`, wait for them, and close the loop.""" + try: + # This part is borrowed from asyncio/runners.py in Python 3.7b2. + to_cancel = [task for task in asyncio.all_tasks(loop) if not task.done()] + if not to_cancel: + return + + for task in to_cancel: + task.cancel() + loop.run_until_complete(asyncio.gather(*to_cancel, return_exceptions=True)) + finally: + # `concurrent.futures.Future` objects cannot be cancelled once they + # are already running. There might be some when the `shutdown()` happened. + # Silence their logger's spew about the event loop being closed. + cf_logger = logging.getLogger("concurrent.futures") + cf_logger.setLevel(logging.CRITICAL) + loop.close() + + +# diff-shades depends on being to monkeypatch this function to operate. I know it's +# not ideal, but this shouldn't cause any issues ... hopefully. ~ichard26 +@mypyc_attr(patchable=True) +def reformat_many( + sources: set[Path], + fast: bool, + write_back: WriteBack, + mode: Mode, + report: Report, + workers: int | None, + no_cache: bool = False, +) -> None: + """Reformat multiple files using a ProcessPoolExecutor.""" + maybe_install_uvloop() + + if workers is None: + workers = int(os.environ.get("BLACK_NUM_WORKERS", 0)) + workers = workers or os.cpu_count() or 1 + if sys.platform == "win32": + # Work around https://bugs.python.org/issue26903 + workers = min(workers, 60) + + executor: Executor | None = None + if workers > 1: + try: + executor = ProcessPoolExecutor(max_workers=workers) + except (ImportError, NotImplementedError, OSError): + # we arrive here if the underlying system does not support multi-processing + # like in AWS Lambda or Termux, in which case we gracefully fallback to + # a ThreadPoolExecutor with just a single worker (more workers would not do + # us any good due to the Global Interpreter Lock) + pass + + if executor is None: + executor = ThreadPoolExecutor(max_workers=1) + + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + try: + loop.run_until_complete( + schedule_formatting( + sources=sources, + fast=fast, + write_back=write_back, + mode=mode, + report=report, + loop=loop, + executor=executor, + no_cache=no_cache, + ) + ) + finally: + try: + shutdown(loop) + finally: + asyncio.set_event_loop(None) + if executor is not None: + executor.shutdown() + + +async def schedule_formatting( + sources: set[Path], + fast: bool, + write_back: WriteBack, + mode: Mode, + report: Report, + loop: asyncio.AbstractEventLoop, + executor: Executor, + no_cache: bool = False, +) -> None: + """Run formatting of `sources` in parallel using the provided `executor`. + + (Use ProcessPoolExecutors for actual parallelism.) + + `write_back`, `fast`, and `mode` options are passed to + :func:`format_file_in_place`. + """ + cache = None if no_cache else Cache.read(mode) + if cache is not None and write_back not in ( + WriteBack.DIFF, + WriteBack.COLOR_DIFF, + ): + sources, cached = cache.filtered_cached(sources) + for src in sorted(cached): + report.done(src, Changed.CACHED) + if not sources: + return + + cancelled = [] + sources_to_cache = [] + lock = None + if write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF): + # For diff output, we need locks to ensure we don't interleave output + # from different processes. + manager = Manager() + lock = manager.Lock() + tasks = { + asyncio.ensure_future( + loop.run_in_executor( + executor, format_file_in_place, src, fast, mode, write_back, lock + ) + ): src + for src in sorted(sources) + } + pending = tasks.keys() + try: + loop.add_signal_handler(signal.SIGINT, cancel, pending) + loop.add_signal_handler(signal.SIGTERM, cancel, pending) + except NotImplementedError: + # There are no good alternatives for these on Windows. + pass + while pending: + done, _ = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED) + for task in done: + src = tasks.pop(task) + if task.cancelled(): + cancelled.append(task) + elif exc := task.exception(): + if report.verbose: + traceback.print_exception(type(exc), exc, exc.__traceback__) + report.failed(src, str(exc)) + else: + changed = Changed.YES if task.result() else Changed.NO + # If the file was written back or was successfully checked as + # well-formatted, store this information in the cache. + if write_back is WriteBack.YES or ( + write_back is WriteBack.CHECK and changed is Changed.NO + ): + sources_to_cache.append(src) + report.done(src, changed) + if cancelled: + await asyncio.gather(*cancelled, return_exceptions=True) + if sources_to_cache and not no_cache and cache is not None: + cache.write(sources_to_cache) diff --git a/py311/lib/python3.11/site-packages/black/const.cpython-311-x86_64-linux-gnu.so b/py311/lib/python3.11/site-packages/black/const.cpython-311-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..b1222881e6f95e3bb82f8e81a36bc36edb7c2f4d Binary files /dev/null and b/py311/lib/python3.11/site-packages/black/const.cpython-311-x86_64-linux-gnu.so differ diff --git a/py311/lib/python3.11/site-packages/black/const.py b/py311/lib/python3.11/site-packages/black/const.py new file mode 100644 index 0000000000000000000000000000000000000000..ee466679c7074caf936b1e2707b82468e8d83432 --- /dev/null +++ b/py311/lib/python3.11/site-packages/black/const.py @@ -0,0 +1,4 @@ +DEFAULT_LINE_LENGTH = 88 +DEFAULT_EXCLUDES = r"/(\.direnv|\.eggs|\.git|\.hg|\.ipynb_checkpoints|\.mypy_cache|\.nox|\.pytest_cache|\.ruff_cache|\.tox|\.svn|\.venv|\.vscode|__pypackages__|_build|buck-out|build|dist|venv)/" # noqa: B950 +DEFAULT_INCLUDES = r"(\.pyi?|\.ipynb)$" +STDIN_PLACEHOLDER = "__BLACK_STDIN_FILENAME__" diff --git a/py311/lib/python3.11/site-packages/black/debug.py b/py311/lib/python3.11/site-packages/black/debug.py new file mode 100644 index 0000000000000000000000000000000000000000..f051c497d0b601e89461332ad17661c643e181f2 --- /dev/null +++ b/py311/lib/python3.11/site-packages/black/debug.py @@ -0,0 +1,55 @@ +from collections.abc import Iterator +from dataclasses import dataclass, field +from typing import Any, TypeVar, Union + +from black.nodes import Visitor +from black.output import out +from black.parsing import lib2to3_parse +from blib2to3.pgen2 import token +from blib2to3.pytree import Leaf, Node, type_repr + +LN = Union[Leaf, Node] +T = TypeVar("T") + + +@dataclass +class DebugVisitor(Visitor[T]): + tree_depth: int = 0 + list_output: list[str] = field(default_factory=list) + print_output: bool = True + + def out(self, message: str, *args: Any, **kwargs: Any) -> None: + self.list_output.append(message) + if self.print_output: + out(message, *args, **kwargs) + + def visit_default(self, node: LN) -> Iterator[T]: + indent = " " * (2 * self.tree_depth) + if isinstance(node, Node): + _type = type_repr(node.type) + self.out(f"{indent}{_type}", fg="yellow") + self.tree_depth += 1 + for child in node.children: + yield from self.visit(child) + + self.tree_depth -= 1 + self.out(f"{indent}/{_type}", fg="yellow", bold=False) + else: + _type = token.tok_name.get(node.type, str(node.type)) + self.out(f"{indent}{_type}", fg="blue", nl=False) + if node.prefix: + # We don't have to handle prefixes for `Node` objects since + # that delegates to the first child anyway. + self.out(f" {node.prefix!r}", fg="green", bold=False, nl=False) + self.out(f" {node.value!r}", fg="blue", bold=False) + + @classmethod + def show(cls, code: str | Leaf | Node) -> None: + """Pretty-print the lib2to3 AST of a given string of `code`. + + Convenience method for debugging. + """ + v: DebugVisitor[None] = DebugVisitor() + if isinstance(code, str): + code = lib2to3_parse(code) + list(v.visit(code)) diff --git a/py311/lib/python3.11/site-packages/black/files.py b/py311/lib/python3.11/site-packages/black/files.py new file mode 100644 index 0000000000000000000000000000000000000000..21ad7bc2ae63a5a721d614158e1b8b0a34d27e87 --- /dev/null +++ b/py311/lib/python3.11/site-packages/black/files.py @@ -0,0 +1,426 @@ +import io +import os +import sys +from collections.abc import Iterable, Iterator, Sequence +from functools import lru_cache +from pathlib import Path +from re import Pattern +from typing import TYPE_CHECKING, Any, Union + +from mypy_extensions import mypyc_attr +from packaging.specifiers import InvalidSpecifier, Specifier, SpecifierSet +from packaging.version import InvalidVersion, Version +from pathspec import PathSpec +from pathspec.patterns.gitwildmatch import GitWildMatchPatternError + +if sys.version_info >= (3, 11): + try: + import tomllib + except ImportError: + # Help users on older alphas + if not TYPE_CHECKING: + import tomli as tomllib +else: + import tomli as tomllib + +from black.handle_ipynb_magics import jupyter_dependencies_are_installed +from black.mode import TargetVersion +from black.output import err +from black.report import Report + +if TYPE_CHECKING: + import colorama + + +@lru_cache +def _load_toml(path: Path | str) -> dict[str, Any]: + with open(path, "rb") as f: + return tomllib.load(f) + + +@lru_cache +def _cached_resolve(path: Path) -> Path: + return path.resolve() + + +@lru_cache +def find_project_root( + srcs: Sequence[str], stdin_filename: str | None = None +) -> tuple[Path, str]: + """Return a directory containing .git, .hg, or pyproject.toml. + + pyproject.toml files are only considered if they contain a [tool.black] + section and are ignored otherwise. + + That directory will be a common parent of all files and directories + passed in `srcs`. + + If no directory in the tree contains a marker that would specify it's the + project root, the root of the file system is returned. + + Returns a two-tuple with the first element as the project root path and + the second element as a string describing the method by which the + project root was discovered. + """ + if stdin_filename is not None: + srcs = tuple(stdin_filename if s == "-" else s for s in srcs) + if not srcs: + srcs = [str(_cached_resolve(Path.cwd()))] + + path_srcs = [_cached_resolve(Path(Path.cwd(), src)) for src in srcs] + + # A list of lists of parents for each 'src'. 'src' is included as a + # "parent" of itself if it is a directory + src_parents = [ + list(path.parents) + ([path] if path.is_dir() else []) for path in path_srcs + ] + + common_base = max( + set.intersection(*(set(parents) for parents in src_parents)), + key=lambda path: path.parts, + ) + + for directory in (common_base, *common_base.parents): + if (directory / ".git").exists(): + return directory, ".git directory" + + if (directory / ".hg").is_dir(): + return directory, ".hg directory" + + if (directory / "pyproject.toml").is_file(): + pyproject_toml = _load_toml(directory / "pyproject.toml") + if "black" in pyproject_toml.get("tool", {}): + return directory, "pyproject.toml" + + return directory, "file system root" + + +def find_pyproject_toml( + path_search_start: tuple[str, ...], stdin_filename: str | None = None +) -> str | None: + """Find the absolute filepath to a pyproject.toml if it exists""" + path_project_root, _ = find_project_root(path_search_start, stdin_filename) + path_pyproject_toml = path_project_root / "pyproject.toml" + if path_pyproject_toml.is_file(): + return str(path_pyproject_toml) + + try: + path_user_pyproject_toml = find_user_pyproject_toml() + return ( + str(path_user_pyproject_toml) + if path_user_pyproject_toml.is_file() + else None + ) + except (PermissionError, RuntimeError) as e: + # We do not have access to the user-level config directory, so ignore it. + err(f"Ignoring user configuration directory due to {e!r}") + return None + + +@mypyc_attr(patchable=True) +def parse_pyproject_toml(path_config: str) -> dict[str, Any]: + """Parse a pyproject toml file, pulling out relevant parts for Black. + + If parsing fails, will raise a tomllib.TOMLDecodeError. + """ + pyproject_toml = _load_toml(path_config) + config: dict[str, Any] = pyproject_toml.get("tool", {}).get("black", {}) + config = {k.replace("--", "").replace("-", "_"): v for k, v in config.items()} + + if "target_version" not in config: + inferred_target_version = infer_target_version(pyproject_toml) + if inferred_target_version is not None: + config["target_version"] = [v.name.lower() for v in inferred_target_version] + + return config + + +def infer_target_version( + pyproject_toml: dict[str, Any], +) -> list[TargetVersion] | None: + """Infer Black's target version from the project metadata in pyproject.toml. + + Supports the PyPA standard format (PEP 621): + https://packaging.python.org/en/latest/specifications/declaring-project-metadata/#requires-python + + If the target version cannot be inferred, returns None. + """ + project_metadata = pyproject_toml.get("project", {}) + requires_python = project_metadata.get("requires-python", None) + if requires_python is not None: + try: + return parse_req_python_version(requires_python) + except InvalidVersion: + pass + try: + return parse_req_python_specifier(requires_python) + except (InvalidSpecifier, InvalidVersion): + pass + + return None + + +def parse_req_python_version(requires_python: str) -> list[TargetVersion] | None: + """Parse a version string (i.e. ``"3.7"``) to a list of TargetVersion. + + If parsing fails, will raise a packaging.version.InvalidVersion error. + If the parsed version cannot be mapped to a valid TargetVersion, returns None. + """ + version = Version(requires_python) + if version.release[0] != 3: + return None + try: + return [TargetVersion(version.release[1])] + except (IndexError, ValueError): + return None + + +def parse_req_python_specifier(requires_python: str) -> list[TargetVersion] | None: + """Parse a specifier string (i.e. ``">=3.7,<3.10"``) to a list of TargetVersion. + + If parsing fails, will raise a packaging.specifiers.InvalidSpecifier error. + If the parsed specifier cannot be mapped to a valid TargetVersion, returns None. + """ + specifier_set = strip_specifier_set(SpecifierSet(requires_python)) + if not specifier_set: + return None + + target_version_map = {f"3.{v.value}": v for v in TargetVersion} + compatible_versions: list[str] = list(specifier_set.filter(target_version_map)) + if compatible_versions: + return [target_version_map[v] for v in compatible_versions] + return None + + +def strip_specifier_set(specifier_set: SpecifierSet) -> SpecifierSet: + """Strip minor versions for some specifiers in the specifier set. + + For background on version specifiers, see PEP 440: + https://peps.python.org/pep-0440/#version-specifiers + """ + specifiers = [] + for s in specifier_set: + if "*" in str(s): + specifiers.append(s) + elif s.operator in ["~=", "==", ">=", "==="]: + version = Version(s.version) + stripped = Specifier(f"{s.operator}{version.major}.{version.minor}") + specifiers.append(stripped) + elif s.operator == ">": + version = Version(s.version) + if len(version.release) > 2: + s = Specifier(f">={version.major}.{version.minor}") + specifiers.append(s) + else: + specifiers.append(s) + + return SpecifierSet(",".join(str(s) for s in specifiers)) + + +@lru_cache +def find_user_pyproject_toml() -> Path: + r"""Return the path to the top-level user configuration for black. + + This looks for ~\.black on Windows and ~/.config/black on Linux and other + Unix systems. + + May raise: + - RuntimeError: if the current user has no homedir + - PermissionError: if the current process cannot access the user's homedir + """ + if sys.platform == "win32": + # Windows + user_config_path = Path.home() / ".black" + else: + config_root = os.environ.get("XDG_CONFIG_HOME", "~/.config") + user_config_path = Path(config_root).expanduser() / "black" + return _cached_resolve(user_config_path) + + +@lru_cache +def get_gitignore(root: Path) -> PathSpec: + """Return a PathSpec matching gitignore content if present.""" + gitignore = root / ".gitignore" + lines: list[str] = [] + if gitignore.is_file(): + with gitignore.open(encoding="utf-8") as gf: + lines = gf.readlines() + try: + return PathSpec.from_lines("gitwildmatch", lines) + except GitWildMatchPatternError as e: + err(f"Could not parse {gitignore}: {e}") + raise + + +def resolves_outside_root_or_cannot_stat( + path: Path, + root: Path, + report: Report | None = None, +) -> bool: + """ + Returns whether the path is a symbolic link that points outside the + root directory. Also returns True if we failed to resolve the path. + """ + try: + resolved_path = _cached_resolve(path) + except OSError as e: + if report: + report.path_ignored(path, f"cannot be read because {e}") + return True + try: + resolved_path.relative_to(root) + except ValueError: + if report: + report.path_ignored(path, f"is a symbolic link that points outside {root}") + return True + return False + + +def best_effort_relative_path(path: Path, root: Path) -> Path: + # Precondition: resolves_outside_root_or_cannot_stat(path, root) is False + try: + return path.absolute().relative_to(root) + except ValueError: + pass + root_parent = next((p for p in path.parents if _cached_resolve(p) == root), None) + if root_parent is not None: + return path.relative_to(root_parent) + # something adversarial, fallback to path guaranteed by precondition + return _cached_resolve(path).relative_to(root) + + +def _path_is_ignored( + root_relative_path: str, + root: Path, + gitignore_dict: dict[Path, PathSpec], +) -> bool: + path = root / root_relative_path + # Note that this logic is sensitive to the ordering of gitignore_dict. Callers must + # ensure that gitignore_dict is ordered from least specific to most specific. + for gitignore_path, pattern in gitignore_dict.items(): + try: + relative_path = path.relative_to(gitignore_path).as_posix() + if path.is_dir(): + relative_path = relative_path + "/" + except ValueError: + break + if pattern.match_file(relative_path): + return True + return False + + +def path_is_excluded( + normalized_path: str, + pattern: Pattern[str] | None, +) -> bool: + match = pattern.search(normalized_path) if pattern else None + return bool(match and match.group(0)) + + +def gen_python_files( + paths: Iterable[Path], + root: Path, + include: Pattern[str], + exclude: Pattern[str], + extend_exclude: Pattern[str] | None, + force_exclude: Pattern[str] | None, + report: Report, + gitignore_dict: dict[Path, PathSpec] | None, + *, + verbose: bool, + quiet: bool, +) -> Iterator[Path]: + """Generate all files under `path` whose paths are not excluded by the + `exclude_regex`, `extend_exclude`, or `force_exclude` regexes, + but are included by the `include` regex. + + Symbolic links pointing outside of the `root` directory are ignored. + + `report` is where output about exclusions goes. + """ + + assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}" + for child in paths: + assert child.is_absolute() + root_relative_path = child.relative_to(root).as_posix() + + # First ignore files matching .gitignore, if passed + if gitignore_dict and _path_is_ignored( + root_relative_path, root, gitignore_dict + ): + report.path_ignored(child, "matches a .gitignore file content") + continue + + # Then ignore with `--exclude` `--extend-exclude` and `--force-exclude` options. + root_relative_path = "/" + root_relative_path + if child.is_dir(): + root_relative_path += "/" + + if path_is_excluded(root_relative_path, exclude): + report.path_ignored(child, "matches the --exclude regular expression") + continue + + if path_is_excluded(root_relative_path, extend_exclude): + report.path_ignored( + child, "matches the --extend-exclude regular expression" + ) + continue + + if path_is_excluded(root_relative_path, force_exclude): + report.path_ignored(child, "matches the --force-exclude regular expression") + continue + + if resolves_outside_root_or_cannot_stat(child, root, report): + continue + + if child.is_dir(): + # If gitignore is None, gitignore usage is disabled, while a Falsey + # gitignore is when the directory doesn't have a .gitignore file. + if gitignore_dict is not None: + new_gitignore_dict = { + **gitignore_dict, + root / child: get_gitignore(child), + } + else: + new_gitignore_dict = None + yield from gen_python_files( + child.iterdir(), + root, + include, + exclude, + extend_exclude, + force_exclude, + report, + new_gitignore_dict, + verbose=verbose, + quiet=quiet, + ) + + elif child.is_file(): + if child.suffix == ".ipynb" and not jupyter_dependencies_are_installed( + warn=verbose or not quiet + ): + continue + include_match = include.search(root_relative_path) if include else True + if include_match: + yield child + + +def wrap_stream_for_windows( + f: io.TextIOWrapper, +) -> Union[io.TextIOWrapper, "colorama.AnsiToWin32"]: + """ + Wrap stream with colorama's wrap_stream so colors are shown on Windows. + + If `colorama` is unavailable, the original stream is returned unmodified. + Otherwise, the `wrap_stream()` function determines whether the stream needs + to be wrapped for a Windows environment and will accordingly either return + an `AnsiToWin32` wrapper or the original stream. + """ + try: + from colorama.initialise import wrap_stream + except ImportError: + return f + else: + # Set `strip=False` to avoid needing to modify test_express_diff_with_color. + return wrap_stream(f, convert=None, strip=False, autoreset=False, wrap=True) diff --git a/py311/lib/python3.11/site-packages/black/handle_ipynb_magics.cpython-311-x86_64-linux-gnu.so b/py311/lib/python3.11/site-packages/black/handle_ipynb_magics.cpython-311-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..431014cdf92010fad372d6db94108002718fadd8 Binary files /dev/null and b/py311/lib/python3.11/site-packages/black/handle_ipynb_magics.cpython-311-x86_64-linux-gnu.so differ diff --git a/py311/lib/python3.11/site-packages/black/handle_ipynb_magics.py b/py311/lib/python3.11/site-packages/black/handle_ipynb_magics.py new file mode 100644 index 0000000000000000000000000000000000000000..c84fe6219fba92603d2264070040cd60e5e0e97a --- /dev/null +++ b/py311/lib/python3.11/site-packages/black/handle_ipynb_magics.py @@ -0,0 +1,502 @@ +"""Functions to process IPython magics with.""" + +import ast +import collections +import dataclasses +import re +import secrets +from functools import lru_cache +from importlib.util import find_spec +from typing import TypeGuard + +from black.mode import Mode +from black.output import out +from black.report import NothingChanged + +TRANSFORMED_MAGICS = frozenset(( + "get_ipython().run_cell_magic", + "get_ipython().system", + "get_ipython().getoutput", + "get_ipython().run_line_magic", +)) +TOKENS_TO_IGNORE = frozenset(( + "ENDMARKER", + "NL", + "NEWLINE", + "COMMENT", + "DEDENT", + "UNIMPORTANT_WS", + "ESCAPED_NL", +)) +PYTHON_CELL_MAGICS = frozenset(( + "capture", + "prun", + "pypy", + "python", + "python3", + "time", + "timeit", +)) + + +@dataclasses.dataclass(frozen=True) +class Replacement: + mask: str + src: str + + +@lru_cache +def jupyter_dependencies_are_installed(*, warn: bool) -> bool: + installed = ( + find_spec("tokenize_rt") is not None and find_spec("IPython") is not None + ) + if not installed and warn: + msg = ( + "Skipping .ipynb files as Jupyter dependencies are not installed.\n" + 'You can fix this by running ``pip install "black[jupyter]"``' + ) + out(msg) + return installed + + +def validate_cell(src: str, mode: Mode) -> None: + r"""Check that cell does not already contain TransformerManager transformations, + or non-Python cell magics, which might cause tokenizer_rt to break because of + indentations. + + If a cell contains ``!ls``, then it'll be transformed to + ``get_ipython().system('ls')``. However, if the cell originally contained + ``get_ipython().system('ls')``, then it would get transformed in the same way: + + >>> TransformerManager().transform_cell("get_ipython().system('ls')") + "get_ipython().system('ls')\n" + >>> TransformerManager().transform_cell("!ls") + "get_ipython().system('ls')\n" + + Due to the impossibility of safely roundtripping in such situations, cells + containing transformed magics will be ignored. + """ + if any(transformed_magic in src for transformed_magic in TRANSFORMED_MAGICS): + raise NothingChanged + + line = _get_code_start(src) + if line.startswith("%%") and ( + line.split(maxsplit=1)[0][2:] + not in PYTHON_CELL_MAGICS | mode.python_cell_magics + ): + raise NothingChanged + + +def remove_trailing_semicolon(src: str) -> tuple[str, bool]: + """Remove trailing semicolon from Jupyter notebook cell. + + For example, + + fig, ax = plt.subplots() + ax.plot(x_data, y_data); # plot data + + would become + + fig, ax = plt.subplots() + ax.plot(x_data, y_data) # plot data + + Mirrors the logic in `quiet` from `IPython.core.displayhook`, but uses + ``tokenize_rt`` so that round-tripping works fine. + """ + from tokenize_rt import reversed_enumerate, src_to_tokens, tokens_to_src + + tokens = src_to_tokens(src) + trailing_semicolon = False + for idx, token in reversed_enumerate(tokens): + if token.name in TOKENS_TO_IGNORE: + continue + if token.name == "OP" and token.src == ";": + del tokens[idx] + trailing_semicolon = True + break + if not trailing_semicolon: + return src, False + return tokens_to_src(tokens), True + + +def put_trailing_semicolon_back(src: str, has_trailing_semicolon: bool) -> str: + """Put trailing semicolon back if cell originally had it. + + Mirrors the logic in `quiet` from `IPython.core.displayhook`, but uses + ``tokenize_rt`` so that round-tripping works fine. + """ + if not has_trailing_semicolon: + return src + from tokenize_rt import reversed_enumerate, src_to_tokens, tokens_to_src + + tokens = src_to_tokens(src) + for idx, token in reversed_enumerate(tokens): + if token.name in TOKENS_TO_IGNORE: + continue + tokens[idx] = token._replace(src=token.src + ";") + break + else: # pragma: nocover + raise AssertionError( + "INTERNAL ERROR: Was not able to reinstate trailing semicolon. " + "Please report a bug on https://github.com/psf/black/issues. " + ) from None + return str(tokens_to_src(tokens)) + + +def mask_cell(src: str) -> tuple[str, list[Replacement]]: + """Mask IPython magics so content becomes parseable Python code. + + For example, + + %matplotlib inline + 'foo' + + becomes + + b"25716f358c32750" + 'foo' + + The replacements are returned, along with the transformed code. + """ + replacements: list[Replacement] = [] + try: + ast.parse(src) + except SyntaxError: + # Might have IPython magics, will process below. + pass + else: + # Syntax is fine, nothing to mask, early return. + return src, replacements + + from IPython.core.inputtransformer2 import TransformerManager + + transformer_manager = TransformerManager() + # A side effect of the following transformation is that it also removes any + # empty lines at the beginning of the cell. + transformed = transformer_manager.transform_cell(src) + transformed, cell_magic_replacements = replace_cell_magics(transformed) + replacements += cell_magic_replacements + transformed = transformer_manager.transform_cell(transformed) + transformed, magic_replacements = replace_magics(transformed) + if len(transformed.strip().splitlines()) != len(src.strip().splitlines()): + # Multi-line magic, not supported. + raise NothingChanged + replacements += magic_replacements + return transformed, replacements + + +def create_token(n_chars: int) -> str: + """Create a randomly generated token that is n_chars characters long.""" + assert n_chars > 0 + n_bytes = max(n_chars // 2 - 1, 1) + token = secrets.token_hex(n_bytes) + if len(token) + 3 > n_chars: + token = token[:-1] + # We use a bytestring so that the string does not get interpreted + # as a docstring. + return f'b"{token}"' + + +def get_token(src: str, magic: str) -> str: + """Return randomly generated token to mask IPython magic with. + + For example, if 'magic' was `%matplotlib inline`, then a possible + token to mask it with would be `"43fdd17f7e5ddc83"`. The token + will be the same length as the magic, and we make sure that it was + not already present anywhere else in the cell. + """ + assert magic + n_chars = len(magic) + token = create_token(n_chars) + counter = 0 + while token in src: + token = create_token(n_chars) + counter += 1 + if counter > 100: + raise AssertionError( + "INTERNAL ERROR: Black was not able to replace IPython magic. " + "Please report a bug on https://github.com/psf/black/issues. " + f"The magic might be helpful: {magic}" + ) from None + return token + + +def replace_cell_magics(src: str) -> tuple[str, list[Replacement]]: + r"""Replace cell magic with token. + + Note that 'src' will already have been processed by IPython's + TransformerManager().transform_cell. + + Example, + + get_ipython().run_cell_magic('t', '-n1', 'ls =!ls\n') + + becomes + + "a794." + ls =!ls + + The replacement, along with the transformed code, is returned. + """ + replacements: list[Replacement] = [] + + tree = ast.parse(src) + + cell_magic_finder = CellMagicFinder() + cell_magic_finder.visit(tree) + if cell_magic_finder.cell_magic is None: + return src, replacements + header = cell_magic_finder.cell_magic.header + mask = get_token(src, header) + replacements.append(Replacement(mask=mask, src=header)) + return f"{mask}\n{cell_magic_finder.cell_magic.body}", replacements + + +def replace_magics(src: str) -> tuple[str, list[Replacement]]: + """Replace magics within body of cell. + + Note that 'src' will already have been processed by IPython's + TransformerManager().transform_cell. + + Example, this + + get_ipython().run_line_magic('matplotlib', 'inline') + 'foo' + + becomes + + "5e67db56d490fd39" + 'foo' + + The replacement, along with the transformed code, are returned. + """ + replacements = [] + magic_finder = MagicFinder() + magic_finder.visit(ast.parse(src)) + new_srcs = [] + for i, line in enumerate(src.split("\n"), start=1): + if i in magic_finder.magics: + offsets_and_magics = magic_finder.magics[i] + if len(offsets_and_magics) != 1: # pragma: nocover + raise AssertionError( + f"Expecting one magic per line, got: {offsets_and_magics}\n" + "Please report a bug on https://github.com/psf/black/issues." + ) + col_offset, magic = ( + offsets_and_magics[0].col_offset, + offsets_and_magics[0].magic, + ) + mask = get_token(src, magic) + replacements.append(Replacement(mask=mask, src=magic)) + line = line[:col_offset] + mask + new_srcs.append(line) + return "\n".join(new_srcs), replacements + + +def unmask_cell(src: str, replacements: list[Replacement]) -> str: + """Remove replacements from cell. + + For example + + "9b20" + foo = bar + + becomes + + %%time + foo = bar + """ + for replacement in replacements: + src = src.replace(replacement.mask, replacement.src) + return src + + +def _get_code_start(src: str) -> str: + """Provides the first line where the code starts. + + Iterates over lines of code until it finds the first line that doesn't + contain only empty spaces and comments. It removes any empty spaces at the + start of the line and returns it. If such line doesn't exist, it returns an + empty string. + """ + for match in re.finditer(".+", src): + line = match.group(0).lstrip() + if line and not line.startswith("#"): + return line + return "" + + +def _is_ipython_magic(node: ast.expr) -> TypeGuard[ast.Attribute]: + """Check if attribute is IPython magic. + + Note that the source of the abstract syntax tree + will already have been processed by IPython's + TransformerManager().transform_cell. + """ + return ( + isinstance(node, ast.Attribute) + and isinstance(node.value, ast.Call) + and isinstance(node.value.func, ast.Name) + and node.value.func.id == "get_ipython" + ) + + +def _get_str_args(args: list[ast.expr]) -> list[str]: + str_args = [] + for arg in args: + assert isinstance(arg, ast.Constant) and isinstance(arg.value, str) + str_args.append(arg.value) + return str_args + + +@dataclasses.dataclass(frozen=True) +class CellMagic: + name: str + params: str | None + body: str + + @property + def header(self) -> str: + if self.params: + return f"%%{self.name} {self.params}" + return f"%%{self.name}" + + +# ast.NodeVisitor + dataclass = breakage under mypyc. +class CellMagicFinder(ast.NodeVisitor): + r"""Find cell magics. + + Note that the source of the abstract syntax tree + will already have been processed by IPython's + TransformerManager().transform_cell. + + For example, + + %%time\n + foo() + + would have been transformed to + + get_ipython().run_cell_magic('time', '', 'foo()\n') + + and we look for instances of the latter. + """ + + def __init__(self, cell_magic: CellMagic | None = None) -> None: + self.cell_magic = cell_magic + + def visit_Expr(self, node: ast.Expr) -> None: + """Find cell magic, extract header and body.""" + if ( + isinstance(node.value, ast.Call) + and _is_ipython_magic(node.value.func) + and node.value.func.attr == "run_cell_magic" + ): + args = _get_str_args(node.value.args) + self.cell_magic = CellMagic(name=args[0], params=args[1], body=args[2]) + self.generic_visit(node) + + +@dataclasses.dataclass(frozen=True) +class OffsetAndMagic: + col_offset: int + magic: str + + +# Unsurprisingly, subclassing ast.NodeVisitor means we can't use dataclasses here +# as mypyc will generate broken code. +class MagicFinder(ast.NodeVisitor): + """Visit cell to look for get_ipython calls. + + Note that the source of the abstract syntax tree + will already have been processed by IPython's + TransformerManager().transform_cell. + + For example, + + %matplotlib inline + + would have been transformed to + + get_ipython().run_line_magic('matplotlib', 'inline') + + and we look for instances of the latter (and likewise for other + types of magics). + """ + + def __init__(self) -> None: + self.magics: dict[int, list[OffsetAndMagic]] = collections.defaultdict(list) + + def visit_Assign(self, node: ast.Assign) -> None: + """Look for system assign magics. + + For example, + + black_version = !black --version + env = %env var + + would have been (respectively) transformed to + + black_version = get_ipython().getoutput('black --version') + env = get_ipython().run_line_magic('env', 'var') + + and we look for instances of any of the latter. + """ + if isinstance(node.value, ast.Call) and _is_ipython_magic(node.value.func): + args = _get_str_args(node.value.args) + if node.value.func.attr == "getoutput": + src = f"!{args[0]}" + elif node.value.func.attr == "run_line_magic": + src = f"%{args[0]}" + if args[1]: + src += f" {args[1]}" + else: + raise AssertionError( + f"Unexpected IPython magic {node.value.func.attr!r} found. " + "Please report a bug on https://github.com/psf/black/issues." + ) from None + self.magics[node.value.lineno].append( + OffsetAndMagic(node.value.col_offset, src) + ) + self.generic_visit(node) + + def visit_Expr(self, node: ast.Expr) -> None: + """Look for magics in body of cell. + + For examples, + + !ls + !!ls + ?ls + ??ls + + would (respectively) get transformed to + + get_ipython().system('ls') + get_ipython().getoutput('ls') + get_ipython().run_line_magic('pinfo', 'ls') + get_ipython().run_line_magic('pinfo2', 'ls') + + and we look for instances of any of the latter. + """ + if isinstance(node.value, ast.Call) and _is_ipython_magic(node.value.func): + args = _get_str_args(node.value.args) + if node.value.func.attr == "run_line_magic": + if args[0] == "pinfo": + src = f"?{args[1]}" + elif args[0] == "pinfo2": + src = f"??{args[1]}" + else: + src = f"%{args[0]}" + if args[1]: + src += f" {args[1]}" + elif node.value.func.attr == "system": + src = f"!{args[0]}" + elif node.value.func.attr == "getoutput": + src = f"!!{args[0]}" + else: + raise NothingChanged # unsupported magic. + self.magics[node.value.lineno].append( + OffsetAndMagic(node.value.col_offset, src) + ) + self.generic_visit(node) diff --git a/py311/lib/python3.11/site-packages/black/linegen.cpython-311-x86_64-linux-gnu.so b/py311/lib/python3.11/site-packages/black/linegen.cpython-311-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..af3dc2d952f693313600075cb6e6366cd3b81d94 Binary files /dev/null and b/py311/lib/python3.11/site-packages/black/linegen.cpython-311-x86_64-linux-gnu.so differ diff --git a/py311/lib/python3.11/site-packages/black/linegen.py b/py311/lib/python3.11/site-packages/black/linegen.py new file mode 100644 index 0000000000000000000000000000000000000000..8d6fa30c49dc88675ca58d42cbf2c96acffab104 --- /dev/null +++ b/py311/lib/python3.11/site-packages/black/linegen.py @@ -0,0 +1,2035 @@ +""" +Generating lines of code. +""" + +import re +import sys +from collections.abc import Collection, Iterator +from dataclasses import replace +from enum import Enum, auto +from functools import partial, wraps +from typing import Union, cast + +from black.brackets import ( + COMMA_PRIORITY, + DOT_PRIORITY, + STRING_PRIORITY, + get_leaves_inside_matching_brackets, + max_delimiter_priority_in_atom, +) +from black.comments import ( + FMT_OFF, + FMT_ON, + contains_fmt_directive, + generate_comments, + list_comments, +) +from black.lines import ( + Line, + RHSResult, + append_leaves, + can_be_split, + can_omit_invisible_parens, + is_line_short_enough, + line_to_string, +) +from black.mode import Feature, Mode, Preview +from black.nodes import ( + ASSIGNMENTS, + BRACKETS, + CLOSING_BRACKETS, + OPENING_BRACKETS, + STANDALONE_COMMENT, + STATEMENT, + WHITESPACE, + Visitor, + ensure_visible, + fstring_tstring_to_string, + get_annotation_type, + has_sibling_with_type, + is_arith_like, + is_async_stmt_or_funcdef, + is_atom_with_invisible_parens, + is_docstring, + is_empty_tuple, + is_generator, + is_lpar_token, + is_multiline_string, + is_name_token, + is_one_sequence_between, + is_one_tuple, + is_parent_function_or_class, + is_part_of_annotation, + is_rpar_token, + is_stub_body, + is_stub_suite, + is_tuple, + is_tuple_containing_star, + is_tuple_containing_walrus, + is_type_ignore_comment_string, + is_vararg, + is_walrus_assignment, + is_yield, + syms, + wrap_in_parentheses, +) +from black.numerics import normalize_numeric_literal +from black.strings import ( + fix_multiline_docstring, + get_string_prefix, + normalize_string_prefix, + normalize_string_quotes, + normalize_unicode_escape_sequences, +) +from black.trans import ( + CannotTransform, + StringMerger, + StringParenStripper, + StringParenWrapper, + StringSplitter, + Transformer, + hug_power_op, +) +from blib2to3.pgen2 import token +from blib2to3.pytree import Leaf, Node + +# types +LeafID = int +LN = Union[Leaf, Node] + + +class CannotSplit(CannotTransform): + """A readable split that fits the allotted line length is impossible.""" + + +# This isn't a dataclass because @dataclass + Generic breaks mypyc. +# See also https://github.com/mypyc/mypyc/issues/827. +class LineGenerator(Visitor[Line]): + """Generates reformatted Line objects. Empty lines are not emitted. + + Note: destroys the tree it's visiting by mutating prefixes of its leaves + in ways that will no longer stringify to valid Python code on the tree. + """ + + def __init__(self, mode: Mode, features: Collection[Feature]) -> None: + self.mode = mode + self.features = features + self.current_line: Line + self.__post_init__() + + def line(self, indent: int = 0) -> Iterator[Line]: + """Generate a line. + + If the line is empty, only emit if it makes sense. + If the line is too long, split it first and then generate. + + If any lines were generated, set up a new current_line. + """ + if not self.current_line: + self.current_line.depth += indent + return # Line is empty, don't emit. Creating a new one unnecessary. + + if len(self.current_line.leaves) == 1 and is_async_stmt_or_funcdef( + self.current_line.leaves[0] + ): + # Special case for async def/for/with statements. `visit_async_stmt` + # adds an `ASYNC` leaf then visits the child def/for/with statement + # nodes. Line yields from those nodes shouldn't treat the former + # `ASYNC` leaf as a complete line. + return + + complete_line = self.current_line + self.current_line = Line(mode=self.mode, depth=complete_line.depth + indent) + yield complete_line + + def visit_default(self, node: LN) -> Iterator[Line]: + """Default `visit_*()` implementation. Recurses to children of `node`.""" + if isinstance(node, Leaf): + any_open_brackets = self.current_line.bracket_tracker.any_open_brackets() + for comment in generate_comments(node, mode=self.mode): + if any_open_brackets: + # any comment within brackets is subject to splitting + self.current_line.append(comment) + elif comment.type == token.COMMENT: + # regular trailing comment + self.current_line.append(comment) + yield from self.line() + + else: + # regular standalone comment + yield from self.line() + + self.current_line.append(comment) + yield from self.line() + + if any_open_brackets: + node.prefix = "" + if node.type not in WHITESPACE: + self.current_line.append(node) + yield from super().visit_default(node) + + def visit_test(self, node: Node) -> Iterator[Line]: + """Visit an `x if y else z` test""" + + already_parenthesized = ( + node.prev_sibling and node.prev_sibling.type == token.LPAR + ) + + if not already_parenthesized: + # Similar to logic in wrap_in_parentheses + lpar = Leaf(token.LPAR, "") + rpar = Leaf(token.RPAR, "") + prefix = node.prefix + node.prefix = "" + lpar.prefix = prefix + node.insert_child(0, lpar) + node.append_child(rpar) + + yield from self.visit_default(node) + + def visit_INDENT(self, node: Leaf) -> Iterator[Line]: + """Increase indentation level, maybe yield a line.""" + # In blib2to3 INDENT never holds comments. + yield from self.line(+1) + yield from self.visit_default(node) + + def visit_DEDENT(self, node: Leaf) -> Iterator[Line]: + """Decrease indentation level, maybe yield a line.""" + # The current line might still wait for trailing comments. At DEDENT time + # there won't be any (they would be prefixes on the preceding NEWLINE). + # Emit the line then. + yield from self.line() + + # While DEDENT has no value, its prefix may contain standalone comments + # that belong to the current indentation level. Get 'em. + yield from self.visit_default(node) + + # Finally, emit the dedent. + yield from self.line(-1) + + def visit_stmt( + self, node: Node, keywords: set[str], parens: set[str] + ) -> Iterator[Line]: + """Visit a statement. + + This implementation is shared for `if`, `while`, `for`, `try`, `except`, + `def`, `with`, `class`, `assert`, and assignments. + + The relevant Python language `keywords` for a given statement will be + NAME leaves within it. This methods puts those on a separate line. + + `parens` holds a set of string leaf values immediately after which + invisible parens should be put. + """ + normalize_invisible_parens( + node, parens_after=parens, mode=self.mode, features=self.features + ) + for child in node.children: + if is_name_token(child) and child.value in keywords: + yield from self.line() + + yield from self.visit(child) + + def visit_typeparams(self, node: Node) -> Iterator[Line]: + yield from self.visit_default(node) + node.children[0].prefix = "" + + def visit_typevartuple(self, node: Node) -> Iterator[Line]: + yield from self.visit_default(node) + node.children[1].prefix = "" + + def visit_paramspec(self, node: Node) -> Iterator[Line]: + yield from self.visit_default(node) + node.children[1].prefix = "" + + def visit_dictsetmaker(self, node: Node) -> Iterator[Line]: + if Preview.wrap_long_dict_values_in_parens in self.mode: + for i, child in enumerate(node.children): + if i == 0: + continue + if node.children[i - 1].type == token.COLON: + if ( + child.type == syms.atom + and child.children[0].type in OPENING_BRACKETS + and not is_walrus_assignment(child) + ): + maybe_make_parens_invisible_in_atom( + child, + parent=node, + mode=self.mode, + features=self.features, + remove_brackets_around_comma=False, + ) + else: + wrap_in_parentheses(node, child, visible=False) + yield from self.visit_default(node) + + def visit_funcdef(self, node: Node) -> Iterator[Line]: + """Visit function definition.""" + yield from self.line() + + # Remove redundant brackets around return type annotation. + is_return_annotation = False + for child in node.children: + if child.type == token.RARROW: + is_return_annotation = True + elif is_return_annotation: + if child.type == syms.atom and child.children[0].type == token.LPAR: + if maybe_make_parens_invisible_in_atom( + child, + parent=node, + mode=self.mode, + features=self.features, + remove_brackets_around_comma=False, + ): + wrap_in_parentheses(node, child, visible=False) + else: + wrap_in_parentheses(node, child, visible=False) + is_return_annotation = False + + for child in node.children: + yield from self.visit(child) + + def visit_match_case(self, node: Node) -> Iterator[Line]: + """Visit either a match or case statement.""" + normalize_invisible_parens( + node, parens_after=set(), mode=self.mode, features=self.features + ) + + yield from self.line() + for child in node.children: + yield from self.visit(child) + + def visit_suite(self, node: Node) -> Iterator[Line]: + """Visit a suite.""" + if is_stub_suite(node): + yield from self.visit(node.children[2]) + else: + yield from self.visit_default(node) + + def visit_simple_stmt(self, node: Node) -> Iterator[Line]: + """Visit a statement without nested statements.""" + prev_type: int | None = None + for child in node.children: + if (prev_type is None or prev_type == token.SEMI) and is_arith_like(child): + wrap_in_parentheses(node, child, visible=False) + prev_type = child.type + + if node.parent and node.parent.type in STATEMENT: + if is_parent_function_or_class(node) and is_stub_body(node): + yield from self.visit_default(node) + else: + yield from self.line(+1) + yield from self.visit_default(node) + yield from self.line(-1) + + else: + if node.parent and is_stub_suite(node.parent): + node.prefix = "" + yield from self.visit_default(node) + return + yield from self.line() + yield from self.visit_default(node) + + def visit_async_stmt(self, node: Node) -> Iterator[Line]: + """Visit `async def`, `async for`, `async with`.""" + yield from self.line() + + children = iter(node.children) + for child in children: + yield from self.visit(child) + + if child.type == token.ASYNC or child.type == STANDALONE_COMMENT: + # STANDALONE_COMMENT happens when `# fmt: skip` is applied on the async + # line. + break + + internal_stmt = next(children) + yield from self.visit(internal_stmt) + + def visit_decorators(self, node: Node) -> Iterator[Line]: + """Visit decorators.""" + for child in node.children: + yield from self.line() + yield from self.visit(child) + + def visit_power(self, node: Node) -> Iterator[Line]: + for idx, leaf in enumerate(node.children[:-1]): + next_leaf = node.children[idx + 1] + + if not isinstance(leaf, Leaf): + continue + + value = leaf.value.lower() + if ( + leaf.type == token.NUMBER + and next_leaf.type == syms.trailer + # Ensure that we are in an attribute trailer + and next_leaf.children[0].type == token.DOT + # It shouldn't wrap hexadecimal, binary and octal literals + and not value.startswith(("0x", "0b", "0o")) + # It shouldn't wrap complex literals + and "j" not in value + ): + wrap_in_parentheses(node, leaf) + + remove_await_parens(node, mode=self.mode, features=self.features) + + yield from self.visit_default(node) + + def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]: + """Remove a semicolon and put the other statement on a separate line.""" + yield from self.line() + + def visit_ENDMARKER(self, leaf: Leaf) -> Iterator[Line]: + """End of file. Process outstanding comments and end with a newline.""" + yield from self.visit_default(leaf) + yield from self.line() + + def visit_STANDALONE_COMMENT(self, leaf: Leaf) -> Iterator[Line]: + any_open_brackets = self.current_line.bracket_tracker.any_open_brackets() + if not any_open_brackets: + yield from self.line() + # STANDALONE_COMMENT nodes created by our special handling in + # normalize_fmt_off for comment-only blocks have fmt:off as the first + # line and fmt:on as the last line (each directive on its own line, + # not embedded in other text). These should be appended directly + # without calling visit_default, which would process their prefix and + # lose indentation. Normal STANDALONE_COMMENT nodes go through + # visit_default. + value = leaf.value + lines = value.splitlines() + is_fmt_off_block = ( + len(lines) >= 2 + and contains_fmt_directive(lines[0], FMT_OFF) + and contains_fmt_directive(lines[-1], FMT_ON) + ) + if is_fmt_off_block: + # This is a fmt:off/on block from normalize_fmt_off - we still need + # to process any prefix comments (like markdown comments) but append + # the fmt block itself directly to preserve its formatting + + # Only process prefix comments if there actually is a prefix with comments + if leaf.prefix and any( + line.strip().startswith("#") + and not contains_fmt_directive(line.strip()) + for line in leaf.prefix.split("\n") + ): + for comment in generate_comments(leaf, mode=self.mode): + yield from self.line() + self.current_line.append(comment) + yield from self.line() + # Clear the prefix since we've processed it as comments above + leaf.prefix = "" + + self.current_line.append(leaf) + if not any_open_brackets: + yield from self.line() + else: + # Normal standalone comment - process through visit_default + yield from self.visit_default(leaf) + + def visit_factor(self, node: Node) -> Iterator[Line]: + """Force parentheses between a unary op and a binary power: + + -2 ** 8 -> -(2 ** 8) + """ + _operator, operand = node.children + if ( + operand.type == syms.power + and len(operand.children) == 3 + and operand.children[1].type == token.DOUBLESTAR + ): + lpar = Leaf(token.LPAR, "(") + rpar = Leaf(token.RPAR, ")") + index = operand.remove() or 0 + node.insert_child(index, Node(syms.atom, [lpar, operand, rpar])) + yield from self.visit_default(node) + + def visit_tname(self, node: Node) -> Iterator[Line]: + """ + Add potential parentheses around types in function parameter lists to be made + into real parentheses in case the type hint is too long to fit on a line + Examples: + def foo(a: int, b: float = 7): ... + + -> + + def foo(a: (int), b: (float) = 7): ... + """ + if len(node.children) == 3 and maybe_make_parens_invisible_in_atom( + node.children[2], parent=node, mode=self.mode, features=self.features + ): + wrap_in_parentheses(node, node.children[2], visible=False) + + yield from self.visit_default(node) + + def visit_STRING(self, leaf: Leaf) -> Iterator[Line]: + normalize_unicode_escape_sequences(leaf) + + if is_docstring(leaf) and not re.search(r"\\\s*\n", leaf.value): + # We're ignoring docstrings with backslash newline escapes because changing + # indentation of those changes the AST representation of the code. + if self.mode.string_normalization: + docstring = normalize_string_prefix(leaf.value) + # We handle string normalization at the end of this method, but since + # what we do right now acts differently depending on quote style (ex. + # see padding logic below), there's a possibility for unstable + # formatting. To avoid a situation where this function formats a + # docstring differently on the second pass, normalize it early. + docstring = normalize_string_quotes(docstring) + else: + docstring = leaf.value + prefix = get_string_prefix(docstring) + docstring = docstring[len(prefix) :] # Remove the prefix + quote_char = docstring[0] + # A natural way to remove the outer quotes is to do: + # docstring = docstring.strip(quote_char) + # but that breaks on """""x""" (which is '""x'). + # So we actually need to remove the first character and the next two + # characters but only if they are the same as the first. + quote_len = 1 if docstring[1] != quote_char else 3 + docstring = docstring[quote_len:-quote_len] + docstring_started_empty = not docstring + indent = " " * 4 * self.current_line.depth + + if is_multiline_string(leaf): + docstring = fix_multiline_docstring(docstring, indent) + else: + docstring = docstring.strip() + + has_trailing_backslash = False + if docstring: + # Add some padding if the docstring starts / ends with a quote mark. + if docstring[0] == quote_char: + docstring = " " + docstring + if docstring[-1] == quote_char: + docstring += " " + if docstring[-1] == "\\": + backslash_count = len(docstring) - len(docstring.rstrip("\\")) + if backslash_count % 2: + # Odd number of tailing backslashes, add some padding to + # avoid escaping the closing string quote. + docstring += " " + has_trailing_backslash = True + elif not docstring_started_empty: + docstring = " " + + # We could enforce triple quotes at this point. + quote = quote_char * quote_len + + # It's invalid to put closing single-character quotes on a new line. + if quote_len == 3: + # We need to find the length of the last line of the docstring + # to find if we can add the closing quotes to the line without + # exceeding the maximum line length. + # If docstring is one line, we don't put the closing quotes on a + # separate line because it looks ugly (#3320). + lines = docstring.splitlines() + last_line_length = len(lines[-1]) if docstring else 0 + + # If adding closing quotes would cause the last line to exceed + # the maximum line length, and the closing quote is not + # prefixed by a newline then put a line break before + # the closing quotes + if ( + len(lines) > 1 + and last_line_length + quote_len > self.mode.line_length + and len(indent) + quote_len <= self.mode.line_length + and not has_trailing_backslash + ): + if leaf.value[-1 - quote_len] == "\n": + leaf.value = prefix + quote + docstring + quote + else: + leaf.value = prefix + quote + docstring + "\n" + indent + quote + else: + leaf.value = prefix + quote + docstring + quote + else: + leaf.value = prefix + quote + docstring + quote + + if self.mode.string_normalization and leaf.type == token.STRING: + leaf.value = normalize_string_prefix(leaf.value) + leaf.value = normalize_string_quotes(leaf.value) + yield from self.visit_default(leaf) + + def visit_NUMBER(self, leaf: Leaf) -> Iterator[Line]: + normalize_numeric_literal(leaf) + yield from self.visit_default(leaf) + + def visit_atom(self, node: Node) -> Iterator[Line]: + """Visit any atom""" + if len(node.children) == 3: + first = node.children[0] + last = node.children[-1] + if (first.type == token.LSQB and last.type == token.RSQB) or ( + first.type == token.LBRACE and last.type == token.RBRACE + ): + # Lists or sets of one item + maybe_make_parens_invisible_in_atom( + node.children[1], + parent=node, + mode=self.mode, + features=self.features, + ) + + yield from self.visit_default(node) + + def visit_fstring(self, node: Node) -> Iterator[Line]: + # currently we don't want to format and split f-strings at all. + string_leaf = fstring_tstring_to_string(node) + node.replace(string_leaf) + if "\\" in string_leaf.value and any( + "\\" in str(child) + for child in node.children + if child.type == syms.fstring_replacement_field + ): + # string normalization doesn't account for nested quotes, + # causing breakages. skip normalization when nested quotes exist + yield from self.visit_default(string_leaf) + return + yield from self.visit_STRING(string_leaf) + + def visit_tstring(self, node: Node) -> Iterator[Line]: + # currently we don't want to format and split t-strings at all. + string_leaf = fstring_tstring_to_string(node) + node.replace(string_leaf) + if "\\" in string_leaf.value and any( + "\\" in str(child) + for child in node.children + if child.type == syms.fstring_replacement_field + ): + # string normalization doesn't account for nested quotes, + # causing breakages. skip normalization when nested quotes exist + yield from self.visit_default(string_leaf) + return + yield from self.visit_STRING(string_leaf) + + # TODO: Uncomment Implementation to format f-string children + # fstring_start = node.children[0] + # fstring_end = node.children[-1] + # assert isinstance(fstring_start, Leaf) + # assert isinstance(fstring_end, Leaf) + + # quote_char = fstring_end.value[0] + # quote_idx = fstring_start.value.index(quote_char) + # prefix, quote = ( + # fstring_start.value[:quote_idx], + # fstring_start.value[quote_idx:] + # ) + + # if not is_docstring(node, self.mode): + # prefix = normalize_string_prefix(prefix) + + # assert quote == fstring_end.value + + # is_raw_fstring = "r" in prefix or "R" in prefix + # middles = [ + # leaf + # for leaf in node.leaves() + # if leaf.type == token.FSTRING_MIDDLE + # ] + + # if self.mode.string_normalization: + # middles, quote = normalize_fstring_quotes(quote, middles, is_raw_fstring) + + # fstring_start.value = prefix + quote + # fstring_end.value = quote + + # yield from self.visit_default(node) + + def visit_comp_for(self, node: Node) -> Iterator[Line]: + if Preview.wrap_comprehension_in in self.mode: + normalize_invisible_parens( + node, parens_after={"in"}, mode=self.mode, features=self.features + ) + yield from self.visit_default(node) + + def visit_old_comp_for(self, node: Node) -> Iterator[Line]: + yield from self.visit_comp_for(node) + + def __post_init__(self) -> None: + """You are in a twisty little maze of passages.""" + self.current_line = Line(mode=self.mode) + + v = self.visit_stmt + Ø: set[str] = set() + self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","}) + self.visit_if_stmt = partial( + v, keywords={"if", "else", "elif"}, parens={"if", "elif"} + ) + self.visit_while_stmt = partial(v, keywords={"while", "else"}, parens={"while"}) + self.visit_for_stmt = partial(v, keywords={"for", "else"}, parens={"for", "in"}) + self.visit_try_stmt = partial( + v, keywords={"try", "except", "else", "finally"}, parens=Ø + ) + self.visit_except_clause = partial(v, keywords={"except"}, parens={"except"}) + self.visit_with_stmt = partial(v, keywords={"with"}, parens={"with"}) + self.visit_classdef = partial(v, keywords={"class"}, parens=Ø) + + self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS) + self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"}) + self.visit_import_from = partial(v, keywords=Ø, parens={"import"}) + self.visit_del_stmt = partial(v, keywords=Ø, parens={"del"}) + self.visit_async_funcdef = self.visit_async_stmt + self.visit_decorated = self.visit_decorators + + # PEP 634 + self.visit_match_stmt = self.visit_match_case + self.visit_case_block = self.visit_match_case + self.visit_guard = partial(v, keywords=Ø, parens={"if"}) + + +def _hugging_power_ops_line_to_string( + line: Line, + features: Collection[Feature], + mode: Mode, +) -> str | None: + try: + return line_to_string(next(hug_power_op(line, features, mode))) + except CannotTransform: + return None + + +def transform_line( + line: Line, mode: Mode, features: Collection[Feature] = () +) -> Iterator[Line]: + """Transform a `line`, potentially splitting it into many lines. + + They should fit in the allotted `line_length` but might not be able to. + + `features` are syntactical features that may be used in the output. + """ + if line.is_comment: + yield line + return + + line_str = line_to_string(line) + + # We need the line string when power operators are hugging to determine if we should + # split the line. Default to line_str, if no power operator are present on the line. + line_str_hugging_power_ops = ( + _hugging_power_ops_line_to_string(line, features, mode) or line_str + ) + + ll = mode.line_length + sn = mode.string_normalization + string_merge = StringMerger(ll, sn) + string_paren_strip = StringParenStripper(ll, sn) + string_split = StringSplitter(ll, sn) + string_paren_wrap = StringParenWrapper(ll, sn) + + transformers: list[Transformer] + if ( + not line.contains_uncollapsable_type_comments() + and not line.should_split_rhs + and not line.magic_trailing_comma + and ( + is_line_short_enough(line, mode=mode, line_str=line_str_hugging_power_ops) + or line.contains_unsplittable_type_ignore() + ) + and not (line.inside_brackets and line.contains_standalone_comments()) + and not line.contains_implicit_multiline_string_with_comments() + ): + # Only apply basic string preprocessing, since lines shouldn't be split here. + if Preview.string_processing in mode: + transformers = [string_merge, string_paren_strip] + else: + transformers = [] + elif line.is_def and not should_split_funcdef_with_rhs(line, mode): + transformers = [left_hand_split] + else: + + def _rhs( + self: object, line: Line, features: Collection[Feature], mode: Mode + ) -> Iterator[Line]: + """Wraps calls to `right_hand_split`. + + The calls increasingly `omit` right-hand trailers (bracket pairs with + content), meaning the trailers get glued together to split on another + bracket pair instead. + """ + for omit in generate_trailers_to_omit(line, mode.line_length): + lines = list(right_hand_split(line, mode, features, omit=omit)) + # Note: this check is only able to figure out if the first line of the + # *current* transformation fits in the line length. This is true only + # for simple cases. All others require running more transforms via + # `transform_line()`. This check doesn't know if those would succeed. + if is_line_short_enough(lines[0], mode=mode): + yield from lines + return + + # All splits failed, best effort split with no omits. + # This mostly happens to multiline strings that are by definition + # reported as not fitting a single line, as well as lines that contain + # trailing commas (those have to be exploded). + yield from right_hand_split(line, mode, features=features) + + # HACK: nested functions (like _rhs) compiled by mypyc don't retain their + # __name__ attribute which is needed in `run_transformer` further down. + # Unfortunately a nested class breaks mypyc too. So a class must be created + # via type ... https://github.com/mypyc/mypyc/issues/884 + rhs = type("rhs", (), {"__call__": _rhs})() + + if Preview.string_processing in mode: + if line.inside_brackets: + transformers = [ + string_merge, + string_paren_strip, + string_split, + delimiter_split, + standalone_comment_split, + string_paren_wrap, + rhs, + ] + else: + transformers = [ + string_merge, + string_paren_strip, + string_split, + string_paren_wrap, + rhs, + ] + else: + if line.inside_brackets: + transformers = [delimiter_split, standalone_comment_split, rhs] + else: + transformers = [rhs] + # It's always safe to attempt hugging of power operations and pretty much every line + # could match. + transformers.append(hug_power_op) + + for transform in transformers: + # We are accumulating lines in `result` because we might want to abort + # mission and return the original line in the end, or attempt a different + # split altogether. + try: + result = run_transformer(line, transform, mode, features, line_str=line_str) + except CannotTransform: + continue + else: + yield from result + break + + else: + yield line + + +def should_split_funcdef_with_rhs(line: Line, mode: Mode) -> bool: + """If a funcdef has a magic trailing comma in the return type, then we should first + split the line with rhs to respect the comma. + """ + return_type_leaves: list[Leaf] = [] + in_return_type = False + + for leaf in line.leaves: + if leaf.type == token.COLON: + in_return_type = False + if in_return_type: + return_type_leaves.append(leaf) + if leaf.type == token.RARROW: + in_return_type = True + + # using `bracket_split_build_line` will mess with whitespace, so we duplicate a + # couple lines from it. + result = Line(mode=line.mode, depth=line.depth) + leaves_to_track = get_leaves_inside_matching_brackets(return_type_leaves) + for leaf in return_type_leaves: + result.append( + leaf, + preformatted=True, + track_bracket=id(leaf) in leaves_to_track, + ) + + # we could also return true if the line is too long, and the return type is longer + # than the param list. Or if `should_split_rhs` returns True. + return result.magic_trailing_comma is not None + + +class _BracketSplitComponent(Enum): + head = auto() + body = auto() + tail = auto() + + +def left_hand_split( + line: Line, _features: Collection[Feature], mode: Mode +) -> Iterator[Line]: + """Split line into many lines, starting with the first matching bracket pair. + + Note: this usually looks weird, only use this for function definitions. + Prefer RHS otherwise. This is why this function is not symmetrical with + :func:`right_hand_split` which also handles optional parentheses. + """ + for leaf_type in [token.LPAR, token.LSQB]: + tail_leaves: list[Leaf] = [] + body_leaves: list[Leaf] = [] + head_leaves: list[Leaf] = [] + current_leaves = head_leaves + matching_bracket: Leaf | None = None + depth = 0 + for index, leaf in enumerate(line.leaves): + if index == 2 and leaf.type == token.LSQB: + # A [ at index 2 means this is a type param, so start + # tracking the depth + depth += 1 + elif depth > 0: + if leaf.type == token.LSQB: + depth += 1 + elif leaf.type == token.RSQB: + depth -= 1 + if ( + current_leaves is body_leaves + and leaf.type in CLOSING_BRACKETS + and leaf.opening_bracket is matching_bracket + and isinstance(matching_bracket, Leaf) + # If the code is still on LPAR and we are inside a type + # param, ignore the match since this is searching + # for the function arguments + and not (leaf_type == token.LPAR and depth > 0) + ): + ensure_visible(leaf) + ensure_visible(matching_bracket) + current_leaves = tail_leaves if body_leaves else head_leaves + current_leaves.append(leaf) + if current_leaves is head_leaves: + if leaf.type == leaf_type and ( + Preview.fix_type_expansion_split not in mode + or not (leaf_type == token.LPAR and depth > 0) + ): + matching_bracket = leaf + current_leaves = body_leaves + if matching_bracket and tail_leaves: + break + if not matching_bracket or not tail_leaves: + raise CannotSplit("No brackets found") + + head = bracket_split_build_line( + head_leaves, line, matching_bracket, component=_BracketSplitComponent.head + ) + body = bracket_split_build_line( + body_leaves, line, matching_bracket, component=_BracketSplitComponent.body + ) + tail = bracket_split_build_line( + tail_leaves, line, matching_bracket, component=_BracketSplitComponent.tail + ) + bracket_split_succeeded_or_raise(head, body, tail) + for result in (head, body, tail): + if result: + yield result + + +def right_hand_split( + line: Line, + mode: Mode, + features: Collection[Feature] = (), + omit: Collection[LeafID] = (), +) -> Iterator[Line]: + """Split line into many lines, starting with the last matching bracket pair. + + If the split was by optional parentheses, attempt splitting without them, too. + `omit` is a collection of closing bracket IDs that shouldn't be considered for + this split. + + Note: running this function modifies `bracket_depth` on the leaves of `line`. + """ + rhs_result = _first_right_hand_split(line, omit=omit) + yield from _maybe_split_omitting_optional_parens( + rhs_result, line, mode, features=features, omit=omit + ) + + +def _first_right_hand_split( + line: Line, + omit: Collection[LeafID] = (), +) -> RHSResult: + """Split the line into head, body, tail starting with the last bracket pair. + + Note: this function should not have side effects. It's relied upon by + _maybe_split_omitting_optional_parens to get an opinion whether to prefer + splitting on the right side of an assignment statement. + """ + tail_leaves: list[Leaf] = [] + body_leaves: list[Leaf] = [] + head_leaves: list[Leaf] = [] + current_leaves = tail_leaves + opening_bracket: Leaf | None = None + closing_bracket: Leaf | None = None + for leaf in reversed(line.leaves): + if current_leaves is body_leaves: + if leaf is opening_bracket: + current_leaves = head_leaves if body_leaves else tail_leaves + current_leaves.append(leaf) + if current_leaves is tail_leaves: + if leaf.type in CLOSING_BRACKETS and id(leaf) not in omit: + opening_bracket = leaf.opening_bracket + closing_bracket = leaf + current_leaves = body_leaves + if not (opening_bracket and closing_bracket and head_leaves): + # If there is no opening or closing_bracket that means the split failed and + # all content is in the tail. Otherwise, if `head_leaves` are empty, it means + # the matching `opening_bracket` wasn't available on `line` anymore. + raise CannotSplit("No brackets found") + + tail_leaves.reverse() + body_leaves.reverse() + head_leaves.reverse() + + body: Line | None = None + if ( + Preview.hug_parens_with_braces_and_square_brackets in line.mode + and tail_leaves[0].value + and tail_leaves[0].opening_bracket is head_leaves[-1] + ): + inner_body_leaves = list(body_leaves) + hugged_opening_leaves: list[Leaf] = [] + hugged_closing_leaves: list[Leaf] = [] + is_unpacking = body_leaves[0].type in [token.STAR, token.DOUBLESTAR] + unpacking_offset: int = 1 if is_unpacking else 0 + while ( + len(inner_body_leaves) >= 2 + unpacking_offset + and inner_body_leaves[-1].type in CLOSING_BRACKETS + and inner_body_leaves[-1].opening_bracket + is inner_body_leaves[unpacking_offset] + ): + if unpacking_offset: + hugged_opening_leaves.append(inner_body_leaves.pop(0)) + unpacking_offset = 0 + hugged_opening_leaves.append(inner_body_leaves.pop(0)) + hugged_closing_leaves.insert(0, inner_body_leaves.pop()) + + if hugged_opening_leaves and inner_body_leaves: + inner_body = bracket_split_build_line( + inner_body_leaves, + line, + hugged_opening_leaves[-1], + component=_BracketSplitComponent.body, + ) + if ( + line.mode.magic_trailing_comma + and inner_body_leaves[-1].type == token.COMMA + ): + should_hug = True + else: + line_length = line.mode.line_length - sum( + len(str(leaf)) + for leaf in hugged_opening_leaves + hugged_closing_leaves + ) + if is_line_short_enough( + inner_body, mode=replace(line.mode, line_length=line_length) + ): + # Do not hug if it fits on a single line. + should_hug = False + else: + should_hug = True + if should_hug: + body_leaves = inner_body_leaves + head_leaves.extend(hugged_opening_leaves) + tail_leaves = hugged_closing_leaves + tail_leaves + body = inner_body # No need to re-calculate the body again later. + + head = bracket_split_build_line( + head_leaves, line, opening_bracket, component=_BracketSplitComponent.head + ) + if body is None: + body = bracket_split_build_line( + body_leaves, line, opening_bracket, component=_BracketSplitComponent.body + ) + tail = bracket_split_build_line( + tail_leaves, line, opening_bracket, component=_BracketSplitComponent.tail + ) + bracket_split_succeeded_or_raise(head, body, tail) + return RHSResult(head, body, tail, opening_bracket, closing_bracket) + + +def _maybe_split_omitting_optional_parens( + rhs: RHSResult, + line: Line, + mode: Mode, + features: Collection[Feature] = (), + omit: Collection[LeafID] = (), +) -> Iterator[Line]: + if ( + Feature.FORCE_OPTIONAL_PARENTHESES not in features + # the opening bracket is an optional paren + and rhs.opening_bracket.type == token.LPAR + and not rhs.opening_bracket.value + # the closing bracket is an optional paren + and rhs.closing_bracket.type == token.RPAR + and not rhs.closing_bracket.value + # it's not an import (optional parens are the only thing we can split on + # in this case; attempting a split without them is a waste of time) + and not line.is_import + # and we can actually remove the parens + and can_omit_invisible_parens(rhs, mode.line_length) + ): + omit = {id(rhs.closing_bracket), *omit} + try: + # The RHSResult Omitting Optional Parens. + rhs_oop = _first_right_hand_split(line, omit=omit) + if _prefer_split_rhs_oop_over_rhs(rhs_oop, rhs, mode): + yield from _maybe_split_omitting_optional_parens( + rhs_oop, line, mode, features=features, omit=omit + ) + return + + except CannotSplit as e: + # For chained assignments we want to use the previous successful split + if line.is_chained_assignment: + pass + + elif ( + not can_be_split(rhs.body) + and not is_line_short_enough(rhs.body, mode=mode) + and not ( + Preview.wrap_long_dict_values_in_parens + and rhs.opening_bracket.parent + and rhs.opening_bracket.parent.parent + and rhs.opening_bracket.parent.parent.type == syms.dictsetmaker + ) + ): + raise CannotSplit( + "Splitting failed, body is still too long and can't be split." + ) from e + + elif ( + rhs.head.contains_multiline_strings() + or rhs.tail.contains_multiline_strings() + ): + raise CannotSplit( + "The current optional pair of parentheses is bound to fail to" + " satisfy the splitting algorithm because the head or the tail" + " contains multiline strings which by definition never fit one" + " line." + ) from e + + ensure_visible(rhs.opening_bracket) + ensure_visible(rhs.closing_bracket) + for result in (rhs.head, rhs.body, rhs.tail): + if result: + yield result + + +def _prefer_split_rhs_oop_over_rhs( + rhs_oop: RHSResult, rhs: RHSResult, mode: Mode +) -> bool: + """ + Returns whether we should prefer the result from a split omitting optional parens + (rhs_oop) over the original (rhs). + """ + # contains unsplittable type ignore + if ( + rhs_oop.head.contains_unsplittable_type_ignore() + or rhs_oop.body.contains_unsplittable_type_ignore() + or rhs_oop.tail.contains_unsplittable_type_ignore() + ): + return True + + # Retain optional parens around dictionary values + if ( + Preview.wrap_long_dict_values_in_parens + and rhs.opening_bracket.parent + and rhs.opening_bracket.parent.parent + and rhs.opening_bracket.parent.parent.type == syms.dictsetmaker + and rhs.body.bracket_tracker.delimiters + ): + # Unless the split is inside the key + return any(leaf.type == token.COLON for leaf in rhs_oop.tail.leaves) + + # the split is right after `=` + if not (len(rhs.head.leaves) >= 2 and rhs.head.leaves[-2].type == token.EQUAL): + return True + + # the left side of assignment contains brackets + if not any(leaf.type in BRACKETS for leaf in rhs.head.leaves[:-1]): + return True + + # the left side of assignment is short enough (the -1 is for the ending optional + # paren) + if not is_line_short_enough( + rhs.head, mode=replace(mode, line_length=mode.line_length - 1) + ): + return True + + # the left side of assignment won't explode further because of magic trailing comma + if rhs.head.magic_trailing_comma is not None: + return True + + # If we have multiple targets, we prefer more `=`s on the head vs pushing them to + # the body + rhs_head_equal_count = [leaf.type for leaf in rhs.head.leaves].count(token.EQUAL) + rhs_oop_head_equal_count = [leaf.type for leaf in rhs_oop.head.leaves].count( + token.EQUAL + ) + if rhs_head_equal_count > 1 and rhs_head_equal_count > rhs_oop_head_equal_count: + return False + + has_closing_bracket_after_assign = False + for leaf in reversed(rhs_oop.head.leaves): + if leaf.type == token.EQUAL: + break + if leaf.type in CLOSING_BRACKETS: + has_closing_bracket_after_assign = True + break + return ( + # contains matching brackets after the `=` (done by checking there is a + # closing bracket) + has_closing_bracket_after_assign + or ( + # the split is actually from inside the optional parens (done by checking + # the first line still contains the `=`) + any(leaf.type == token.EQUAL for leaf in rhs_oop.head.leaves) + # the first line is short enough + and is_line_short_enough(rhs_oop.head, mode=mode) + ) + ) + + +def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None: + """Raise :exc:`CannotSplit` if the last left- or right-hand split failed. + + Do nothing otherwise. + + A left- or right-hand split is based on a pair of brackets. Content before + (and including) the opening bracket is left on one line, content inside the + brackets is put on a separate line, and finally content starting with and + following the closing bracket is put on a separate line. + + Those are called `head`, `body`, and `tail`, respectively. If the split + produced the same line (all content in `head`) or ended up with an empty `body` + and the `tail` is just the closing bracket, then it's considered failed. + """ + tail_len = len(str(tail).strip()) + if not body: + if tail_len == 0: + raise CannotSplit("Splitting brackets produced the same line") + + elif tail_len < 3: + raise CannotSplit( + f"Splitting brackets on an empty body to save {tail_len} characters is" + " not worth it" + ) + + +def _ensure_trailing_comma( + leaves: list[Leaf], original: Line, opening_bracket: Leaf +) -> bool: + if not leaves: + return False + # Ensure a trailing comma for imports + if original.is_import: + return True + # ...and standalone function arguments + if not original.is_def: + return False + if opening_bracket.value != "(": + return False + # Don't add commas if we already have any commas + if any( + leaf.type == token.COMMA and not is_part_of_annotation(leaf) for leaf in leaves + ): + return False + + # Find a leaf with a parent (comments don't have parents) + leaf_with_parent = next((leaf for leaf in leaves if leaf.parent), None) + if leaf_with_parent is None: + return True + # Don't add commas inside parenthesized return annotations + if get_annotation_type(leaf_with_parent) == "return": + return False + # Don't add commas inside PEP 604 unions + if ( + leaf_with_parent.parent + and leaf_with_parent.parent.next_sibling + and leaf_with_parent.parent.next_sibling.type == token.VBAR + ): + return False + return True + + +def bracket_split_build_line( + leaves: list[Leaf], + original: Line, + opening_bracket: Leaf, + *, + component: _BracketSplitComponent, +) -> Line: + """Return a new line with given `leaves` and respective comments from `original`. + + If it's the head component, brackets will be tracked so trailing commas are + respected. + + If it's the body component, the result line is one-indented inside brackets and as + such has its first leaf's prefix normalized and a trailing comma added when + expected. + """ + result = Line(mode=original.mode, depth=original.depth) + if component is _BracketSplitComponent.body: + result.inside_brackets = True + result.depth += 1 + if _ensure_trailing_comma(leaves, original, opening_bracket): + for i in range(len(leaves) - 1, -1, -1): + if leaves[i].type == STANDALONE_COMMENT: + continue + + if leaves[i].type != token.COMMA: + new_comma = Leaf(token.COMMA, ",") + leaves.insert(i + 1, new_comma) + break + + leaves_to_track: set[LeafID] = set() + if component is _BracketSplitComponent.head: + leaves_to_track = get_leaves_inside_matching_brackets(leaves) + # Populate the line + for leaf in leaves: + result.append( + leaf, + preformatted=True, + track_bracket=id(leaf) in leaves_to_track, + ) + for comment_after in original.comments_after(leaf): + result.append(comment_after, preformatted=True) + if component is _BracketSplitComponent.body and should_split_line( + result, opening_bracket + ): + result.should_split_rhs = True + return result + + +def dont_increase_indentation(split_func: Transformer) -> Transformer: + """Normalize prefix of the first leaf in every line returned by `split_func`. + + This is a decorator over relevant split functions. + """ + + @wraps(split_func) + def split_wrapper( + line: Line, features: Collection[Feature], mode: Mode + ) -> Iterator[Line]: + for split_line in split_func(line, features, mode): + split_line.leaves[0].prefix = "" + yield split_line + + return split_wrapper + + +def _get_last_non_comment_leaf(line: Line) -> int | None: + for leaf_idx in range(len(line.leaves) - 1, 0, -1): + if line.leaves[leaf_idx].type != STANDALONE_COMMENT: + return leaf_idx + return None + + +def _can_add_trailing_comma(leaf: Leaf, features: Collection[Feature]) -> bool: + if is_vararg(leaf, within={syms.typedargslist}): + return Feature.TRAILING_COMMA_IN_DEF in features + if is_vararg(leaf, within={syms.arglist, syms.argument}): + return Feature.TRAILING_COMMA_IN_CALL in features + return True + + +def _safe_add_trailing_comma(safe: bool, delimiter_priority: int, line: Line) -> Line: + if ( + safe + and delimiter_priority == COMMA_PRIORITY + and line.leaves[-1].type != token.COMMA + and line.leaves[-1].type != STANDALONE_COMMENT + ): + new_comma = Leaf(token.COMMA, ",") + line.append(new_comma) + return line + + +MIGRATE_COMMENT_DELIMITERS = {STRING_PRIORITY, COMMA_PRIORITY} + + +@dont_increase_indentation +def delimiter_split( + line: Line, features: Collection[Feature], mode: Mode +) -> Iterator[Line]: + """Split according to delimiters of the highest priority. + + If the appropriate Features are given, the split will add trailing commas + also in function signatures and calls that contain `*` and `**`. + """ + if len(line.leaves) == 0: + raise CannotSplit("Line empty") from None + last_leaf = line.leaves[-1] + + bt = line.bracket_tracker + try: + delimiter_priority = bt.max_delimiter_priority(exclude={id(last_leaf)}) + except ValueError: + raise CannotSplit("No delimiters found") from None + + if ( + delimiter_priority == DOT_PRIORITY + and bt.delimiter_count_with_priority(delimiter_priority) == 1 + ): + raise CannotSplit("Splitting a single attribute from its owner looks wrong") + + current_line = Line( + mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets + ) + lowest_depth = sys.maxsize + trailing_comma_safe = True + + def append_to_line(leaf: Leaf) -> Iterator[Line]: + """Append `leaf` to current line or to new line if appending impossible.""" + nonlocal current_line + try: + current_line.append_safe(leaf, preformatted=True) + except ValueError: + yield current_line + + current_line = Line( + mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets + ) + current_line.append(leaf) + + def append_comments(leaf: Leaf) -> Iterator[Line]: + for comment_after in line.comments_after(leaf): + yield from append_to_line(comment_after) + + last_non_comment_leaf = _get_last_non_comment_leaf(line) + for leaf_idx, leaf in enumerate(line.leaves): + yield from append_to_line(leaf) + + previous_priority = leaf_idx > 0 and bt.delimiters.get( + id(line.leaves[leaf_idx - 1]) + ) + if ( + previous_priority != delimiter_priority + or delimiter_priority in MIGRATE_COMMENT_DELIMITERS + ): + yield from append_comments(leaf) + + lowest_depth = min(lowest_depth, leaf.bracket_depth) + if trailing_comma_safe and leaf.bracket_depth == lowest_depth: + trailing_comma_safe = _can_add_trailing_comma(leaf, features) + + if last_leaf.type == STANDALONE_COMMENT and leaf_idx == last_non_comment_leaf: + current_line = _safe_add_trailing_comma( + trailing_comma_safe, delimiter_priority, current_line + ) + + leaf_priority = bt.delimiters.get(id(leaf)) + if leaf_priority == delimiter_priority: + if ( + leaf_idx + 1 < len(line.leaves) + and delimiter_priority not in MIGRATE_COMMENT_DELIMITERS + ): + yield from append_comments(line.leaves[leaf_idx + 1]) + + yield current_line + current_line = Line( + mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets + ) + + if current_line: + current_line = _safe_add_trailing_comma( + trailing_comma_safe, delimiter_priority, current_line + ) + yield current_line + + +@dont_increase_indentation +def standalone_comment_split( + line: Line, features: Collection[Feature], mode: Mode +) -> Iterator[Line]: + """Split standalone comments from the rest of the line.""" + if not line.contains_standalone_comments(): + raise CannotSplit("Line does not have any standalone comments") + + current_line = Line( + mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets + ) + + def append_to_line(leaf: Leaf) -> Iterator[Line]: + """Append `leaf` to current line or to new line if appending impossible.""" + nonlocal current_line + try: + current_line.append_safe(leaf, preformatted=True) + except ValueError: + yield current_line + + current_line = Line( + line.mode, depth=line.depth, inside_brackets=line.inside_brackets + ) + current_line.append(leaf) + + for leaf in line.leaves: + yield from append_to_line(leaf) + + for comment_after in line.comments_after(leaf): + yield from append_to_line(comment_after) + + if current_line: + yield current_line + + +def normalize_invisible_parens( + node: Node, parens_after: set[str], *, mode: Mode, features: Collection[Feature] +) -> None: + """Make existing optional parentheses invisible or create new ones. + + `parens_after` is a set of string leaf values immediately after which parens + should be put. + + Standardizes on visible parentheses for single-element tuples, and keeps + existing visible parentheses for other tuples and generator expressions. + """ + for pc in list_comments(node.prefix, is_endmarker=False, mode=mode): + if contains_fmt_directive(pc.value, FMT_OFF): + # This `node` has a prefix with `# fmt: off`, don't mess with parens. + return + + # The multiple context managers grammar has a different pattern, thus this is + # separate from the for-loop below. This possibly wraps them in invisible parens, + # and later will be removed in remove_with_parens when needed. + if node.type == syms.with_stmt: + _maybe_wrap_cms_in_parens(node, mode, features) + + check_lpar = False + for index, child in enumerate(list(node.children)): + # Fixes a bug where invisible parens are not properly stripped from + # assignment statements that contain type annotations. + if isinstance(child, Node) and child.type == syms.annassign: + normalize_invisible_parens( + child, parens_after=parens_after, mode=mode, features=features + ) + + # Fixes a bug where invisible parens are not properly wrapped around + # case blocks. + if isinstance(child, Node) and child.type == syms.case_block: + normalize_invisible_parens( + child, parens_after={"case"}, mode=mode, features=features + ) + + # Add parentheses around if guards in case blocks + if isinstance(child, Node) and child.type == syms.guard: + normalize_invisible_parens( + child, parens_after={"if"}, mode=mode, features=features + ) + + # Add parentheses around long tuple unpacking in assignments. + if ( + index == 0 + and isinstance(child, Node) + and child.type == syms.testlist_star_expr + ): + check_lpar = True + + # Check for assignment LHS with preview feature enabled + if ( + Preview.remove_parens_from_assignment_lhs in mode + and index == 0 + and isinstance(child, Node) + and child.type == syms.atom + and node.type == syms.expr_stmt + and not _atom_has_magic_trailing_comma(child, mode) + and not _is_atom_multiline(child) + ): + if maybe_make_parens_invisible_in_atom( + child, + parent=node, + mode=mode, + features=features, + remove_brackets_around_comma=True, + allow_star_expr=True, + ): + wrap_in_parentheses(node, child, visible=False) + + if check_lpar: + if ( + child.type == syms.atom + and node.type == syms.for_stmt + and isinstance(child.prev_sibling, Leaf) + and child.prev_sibling.type == token.NAME + and child.prev_sibling.value == "for" + ): + if maybe_make_parens_invisible_in_atom( + child, + parent=node, + mode=mode, + features=features, + remove_brackets_around_comma=True, + ): + wrap_in_parentheses(node, child, visible=False) + elif isinstance(child, Node) and node.type == syms.with_stmt: + remove_with_parens(child, node, mode=mode, features=features) + elif child.type == syms.atom and not ( + "in" in parens_after + and len(child.children) == 3 + and is_lpar_token(child.children[0]) + and is_rpar_token(child.children[-1]) + and child.children[1].type == syms.test + ): + if maybe_make_parens_invisible_in_atom( + child, parent=node, mode=mode, features=features + ): + wrap_in_parentheses(node, child, visible=False) + elif is_one_tuple(child): + wrap_in_parentheses(node, child, visible=True) + elif node.type == syms.import_from: + _normalize_import_from(node, child, index) + break + elif ( + index == 1 + and child.type == token.STAR + and node.type == syms.except_clause + ): + # In except* (PEP 654), the star is actually part of + # of the keyword. So we need to skip the insertion of + # invisible parentheses to work more precisely. + continue + + elif ( + isinstance(child, Leaf) + and child.next_sibling is not None + and child.next_sibling.type == token.COLON + and child.value == "case" + ): + # A special patch for "case case:" scenario, the second occurrence + # of case will be not parsed as a Python keyword. + break + + elif not is_multiline_string(child): + wrap_in_parentheses(node, child, visible=False) + + comma_check = child.type == token.COMMA + + check_lpar = isinstance(child, Leaf) and ( + child.value in parens_after or comma_check + ) + + +def _normalize_import_from(parent: Node, child: LN, index: int) -> None: + # "import from" nodes store parentheses directly as part of + # the statement + if is_lpar_token(child): + assert is_rpar_token(parent.children[-1]) + # make parentheses invisible + child.value = "" + parent.children[-1].value = "" + elif child.type != token.STAR: + # insert invisible parentheses + parent.insert_child(index, Leaf(token.LPAR, "")) + parent.append_child(Leaf(token.RPAR, "")) + + +def remove_await_parens(node: Node, mode: Mode, features: Collection[Feature]) -> None: + if node.children[0].type == token.AWAIT and len(node.children) > 1: + if ( + node.children[1].type == syms.atom + and node.children[1].children[0].type == token.LPAR + ): + if maybe_make_parens_invisible_in_atom( + node.children[1], + parent=node, + mode=mode, + features=features, + remove_brackets_around_comma=True, + ): + wrap_in_parentheses(node, node.children[1], visible=False) + + # Since await is an expression we shouldn't remove + # brackets in cases where this would change + # the AST due to operator precedence. + # Therefore we only aim to remove brackets around + # power nodes that aren't also await expressions themselves. + # https://peps.python.org/pep-0492/#updated-operator-precedence-table + # N.B. We've still removed any redundant nested brackets though :) + opening_bracket = cast(Leaf, node.children[1].children[0]) + closing_bracket = cast(Leaf, node.children[1].children[-1]) + bracket_contents = node.children[1].children[1] + if isinstance(bracket_contents, Node) and ( + bracket_contents.type != syms.power + or bracket_contents.children[0].type == token.AWAIT + or any( + isinstance(child, Leaf) and child.type == token.DOUBLESTAR + for child in bracket_contents.children + ) + ): + ensure_visible(opening_bracket) + ensure_visible(closing_bracket) + + +def _maybe_wrap_cms_in_parens( + node: Node, mode: Mode, features: Collection[Feature] +) -> None: + """When enabled and safe, wrap the multiple context managers in invisible parens. + + It is only safe when `features` contain Feature.PARENTHESIZED_CONTEXT_MANAGERS. + """ + if ( + Feature.PARENTHESIZED_CONTEXT_MANAGERS not in features + or len(node.children) <= 2 + # If it's an atom, it's already wrapped in parens. + or node.children[1].type == syms.atom + ): + return + colon_index: int | None = None + for i in range(2, len(node.children)): + if node.children[i].type == token.COLON: + colon_index = i + break + if colon_index is not None: + lpar = Leaf(token.LPAR, "") + rpar = Leaf(token.RPAR, "") + context_managers = node.children[1:colon_index] + for child in context_managers: + child.remove() + # After wrapping, the with_stmt will look like this: + # with_stmt + # NAME 'with' + # atom + # LPAR '' + # testlist_gexp + # ... <-- context_managers + # /testlist_gexp + # RPAR '' + # /atom + # COLON ':' + new_child = Node( + syms.atom, [lpar, Node(syms.testlist_gexp, context_managers), rpar] + ) + node.insert_child(1, new_child) + + +def remove_with_parens( + node: Node, parent: Node, mode: Mode, features: Collection[Feature] +) -> None: + """Recursively hide optional parens in `with` statements.""" + # Removing all unnecessary parentheses in with statements in one pass is a tad + # complex as different variations of bracketed statements result in pretty + # different parse trees: + # + # with (open("file")) as f: # this is an asexpr_test + # ... + # + # with (open("file") as f): # this is an atom containing an + # ... # asexpr_test + # + # with (open("file")) as f, (open("file")) as f: # this is asexpr_test, COMMA, + # ... # asexpr_test + # + # with (open("file") as f, open("file") as f): # an atom containing a + # ... # testlist_gexp which then + # # contains multiple asexpr_test(s) + if node.type == syms.atom: + if maybe_make_parens_invisible_in_atom( + node, + parent=parent, + mode=mode, + features=features, + remove_brackets_around_comma=True, + ): + wrap_in_parentheses(parent, node, visible=False) + if isinstance(node.children[1], Node): + remove_with_parens(node.children[1], node, mode=mode, features=features) + elif node.type == syms.testlist_gexp: + for child in node.children: + if isinstance(child, Node): + remove_with_parens(child, node, mode=mode, features=features) + elif node.type == syms.asexpr_test and not any( + leaf.type == token.COLONEQUAL for leaf in node.leaves() + ): + if maybe_make_parens_invisible_in_atom( + node.children[0], + parent=node, + mode=mode, + features=features, + remove_brackets_around_comma=True, + ): + wrap_in_parentheses(node, node.children[0], visible=False) + + +def _atom_has_magic_trailing_comma(node: LN, mode: Mode) -> bool: + """Check if an atom node has a magic trailing comma. + + Returns True for single-element tuples with trailing commas like (a,), + which should be preserved to maintain their tuple type. + """ + if not mode.magic_trailing_comma: + return False + + return is_one_tuple(node) + + +def _is_atom_multiline(node: LN) -> bool: + """Check if an atom node is multiline (indicating intentional formatting).""" + if not isinstance(node, Node) or len(node.children) < 3: + return False + + # Check the middle child (between LPAR and RPAR) for newlines in its subtree + # The first child's prefix contains blank lines/comments before the opening paren + middle = node.children[1] + for child in middle.pre_order(): + if isinstance(child, Leaf) and "\n" in child.prefix: + return True + + return False + + +def maybe_make_parens_invisible_in_atom( + node: LN, + parent: LN, + mode: Mode, + features: Collection[Feature], + remove_brackets_around_comma: bool = False, + allow_star_expr: bool = False, +) -> bool: + """If it's safe, make the parens in the atom `node` invisible, recursively. + Additionally, remove repeated, adjacent invisible parens from the atom `node` + as they are redundant. + + Returns whether the node should itself be wrapped in invisible parentheses. + """ + if ( + node.type not in (syms.atom, syms.expr) + or is_empty_tuple(node) + or is_one_tuple(node) + or (is_tuple(node) and parent.type == syms.asexpr_test) + or ( + is_tuple(node) + and parent.type == syms.with_stmt + and has_sibling_with_type(node, token.COMMA) + ) + or (is_yield(node) and parent.type != syms.expr_stmt) + or ( + # This condition tries to prevent removing non-optional brackets + # around a tuple, however, can be a bit overzealous so we provide + # and option to skip this check for `for` and `with` statements. + not remove_brackets_around_comma + and max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY + # Skip this check in Preview mode in order to + # Remove parentheses around multiple exception types in except and + # except* without as. See PEP 758 for details. + and not ( + Preview.remove_parens_around_except_types in mode + and Feature.UNPARENTHESIZED_EXCEPT_TYPES in features + # is a tuple + and is_tuple(node) + # has a parent node + and node.parent is not None + # parent is an except clause + and node.parent.type == syms.except_clause + # is not immediately followed by as clause + and not ( + node.next_sibling is not None + and is_name_token(node.next_sibling) + and node.next_sibling.value == "as" + ) + ) + ) + or is_tuple_containing_walrus(node) + or (not allow_star_expr and is_tuple_containing_star(node)) + or is_generator(node) + ): + return False + + if is_walrus_assignment(node): + if parent.type in [ + syms.annassign, + syms.expr_stmt, + syms.assert_stmt, + syms.return_stmt, + syms.except_clause, + syms.funcdef, + syms.with_stmt, + syms.testlist_gexp, + syms.tname, + # these ones aren't useful to end users, but they do please fuzzers + syms.for_stmt, + syms.del_stmt, + syms.for_stmt, + ]: + return False + + first = node.children[0] + last = node.children[-1] + if is_lpar_token(first) and is_rpar_token(last): + middle = node.children[1] + # make parentheses invisible + if ( + # If the prefix of `middle` includes a type comment with + # ignore annotation, then we do not remove the parentheses + not is_type_ignore_comment_string(middle.prefix.strip(), mode=mode) + ): + first.value = "" + last.value = "" + maybe_make_parens_invisible_in_atom( + middle, + parent=parent, + mode=mode, + features=features, + remove_brackets_around_comma=remove_brackets_around_comma, + ) + + if is_atom_with_invisible_parens(middle): + # Strip the invisible parens from `middle` by replacing + # it with the child in-between the invisible parens + middle.replace(middle.children[1]) + + if middle.children[0].prefix.strip(): + # Preserve comments before first paren + middle.children[1].prefix = ( + middle.children[0].prefix + middle.children[1].prefix + ) + + if middle.children[-1].prefix.strip(): + # Preserve comments before last paren + last.prefix = middle.children[-1].prefix + last.prefix + + return False + + return True + + +def should_split_line(line: Line, opening_bracket: Leaf) -> bool: + """Should `line` be immediately split with `delimiter_split()` after RHS?""" + + if not (opening_bracket.parent and opening_bracket.value in "[{("): + return False + + # We're essentially checking if the body is delimited by commas and there's more + # than one of them (we're excluding the trailing comma and if the delimiter priority + # is still commas, that means there's more). + exclude = set() + trailing_comma = False + try: + last_leaf = line.leaves[-1] + if last_leaf.type == token.COMMA: + trailing_comma = True + exclude.add(id(last_leaf)) + max_priority = line.bracket_tracker.max_delimiter_priority(exclude=exclude) + except (IndexError, ValueError): + return False + + return max_priority == COMMA_PRIORITY and ( + (line.mode.magic_trailing_comma and trailing_comma) + # always explode imports + or opening_bracket.parent.type in {syms.atom, syms.import_from} + ) + + +def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[set[LeafID]]: + """Generate sets of closing bracket IDs that should be omitted in a RHS. + + Brackets can be omitted if the entire trailer up to and including + a preceding closing bracket fits in one line. + + Yielded sets are cumulative (contain results of previous yields, too). First + set is empty, unless the line should explode, in which case bracket pairs until + the one that needs to explode are omitted. + """ + + omit: set[LeafID] = set() + if not line.magic_trailing_comma: + yield omit + + length = 4 * line.depth + opening_bracket: Leaf | None = None + closing_bracket: Leaf | None = None + inner_brackets: set[LeafID] = set() + for index, leaf, leaf_length in line.enumerate_with_length(is_reversed=True): + length += leaf_length + if length > line_length: + break + + has_inline_comment = leaf_length > len(leaf.value) + len(leaf.prefix) + if leaf.type == STANDALONE_COMMENT or has_inline_comment: + break + + if opening_bracket: + if leaf is opening_bracket: + opening_bracket = None + elif leaf.type in CLOSING_BRACKETS: + prev = line.leaves[index - 1] if index > 0 else None + if ( + prev + and prev.type == token.COMMA + and leaf.opening_bracket is not None + and not is_one_sequence_between( + leaf.opening_bracket, leaf, line.leaves + ) + ): + # Never omit bracket pairs with trailing commas. + # We need to explode on those. + break + + inner_brackets.add(id(leaf)) + elif leaf.type in CLOSING_BRACKETS: + prev = line.leaves[index - 1] if index > 0 else None + if prev and prev.type in OPENING_BRACKETS: + # Empty brackets would fail a split so treat them as "inner" + # brackets (e.g. only add them to the `omit` set if another + # pair of brackets was good enough. + inner_brackets.add(id(leaf)) + continue + + if closing_bracket: + omit.add(id(closing_bracket)) + omit.update(inner_brackets) + inner_brackets.clear() + yield omit + + if ( + prev + and prev.type == token.COMMA + and leaf.opening_bracket is not None + and not is_one_sequence_between(leaf.opening_bracket, leaf, line.leaves) + ): + # Never omit bracket pairs with trailing commas. + # We need to explode on those. + break + + if leaf.value: + opening_bracket = leaf.opening_bracket + closing_bracket = leaf + + +def run_transformer( + line: Line, + transform: Transformer, + mode: Mode, + features: Collection[Feature], + *, + line_str: str = "", +) -> list[Line]: + if not line_str: + line_str = line_to_string(line) + result: list[Line] = [] + for transformed_line in transform(line, features, mode): + if str(transformed_line).strip("\n") == line_str: + raise CannotTransform("Line transformer returned an unchanged result") + + result.extend(transform_line(transformed_line, mode=mode, features=features)) + + features_set = set(features) + if ( + Feature.FORCE_OPTIONAL_PARENTHESES in features_set + or transform.__class__.__name__ != "rhs" + or not line.bracket_tracker.invisible + or any(bracket.value for bracket in line.bracket_tracker.invisible) + or line.contains_multiline_strings() + or result[0].contains_uncollapsable_type_comments() + or result[0].contains_unsplittable_type_ignore() + or is_line_short_enough(result[0], mode=mode) + # If any leaves have no parents (which _can_ occur since + # `transform(line)` potentially destroys the line's underlying node + # structure), then we can't proceed. Doing so would cause the below + # call to `append_leaves()` to fail. + or any(leaf.parent is None for leaf in line.leaves) + ): + return result + + line_copy = line.clone() + append_leaves(line_copy, line, line.leaves) + features_fop = features_set | {Feature.FORCE_OPTIONAL_PARENTHESES} + second_opinion = run_transformer( + line_copy, transform, mode, features_fop, line_str=line_str + ) + if all(is_line_short_enough(ln, mode=mode) for ln in second_opinion): + result = second_opinion + return result diff --git a/py311/lib/python3.11/site-packages/black/lines.cpython-311-x86_64-linux-gnu.so b/py311/lib/python3.11/site-packages/black/lines.cpython-311-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..cc4c66ed73c4687178af12ff648e7085608f6521 Binary files /dev/null and b/py311/lib/python3.11/site-packages/black/lines.cpython-311-x86_64-linux-gnu.so differ diff --git a/py311/lib/python3.11/site-packages/black/lines.py b/py311/lib/python3.11/site-packages/black/lines.py new file mode 100644 index 0000000000000000000000000000000000000000..09fce3a193d62b67be5d2480166d46270cd9dbe7 --- /dev/null +++ b/py311/lib/python3.11/site-packages/black/lines.py @@ -0,0 +1,1101 @@ +import itertools +import math +from collections.abc import Callable, Iterator, Sequence +from dataclasses import dataclass, field +from typing import Optional, TypeVar, Union, cast + +from black.brackets import COMMA_PRIORITY, DOT_PRIORITY, BracketTracker +from black.mode import Mode, Preview +from black.nodes import ( + BRACKETS, + CLOSING_BRACKETS, + OPENING_BRACKETS, + STANDALONE_COMMENT, + TEST_DESCENDANTS, + child_towards, + is_docstring, + is_import, + is_multiline_string, + is_one_sequence_between, + is_type_comment, + is_type_ignore_comment, + is_with_or_async_with_stmt, + make_simple_prefix, + replace_child, + syms, + whitespace, +) +from black.strings import str_width +from blib2to3.pgen2 import token +from blib2to3.pytree import Leaf, Node + +# types +T = TypeVar("T") +Index = int +LeafID = int +LN = Union[Leaf, Node] + + +@dataclass +class Line: + """Holds leaves and comments. Can be printed with `str(line)`.""" + + mode: Mode = field(repr=False) + depth: int = 0 + leaves: list[Leaf] = field(default_factory=list) + # keys ordered like `leaves` + comments: dict[LeafID, list[Leaf]] = field(default_factory=dict) + bracket_tracker: BracketTracker = field(default_factory=BracketTracker) + inside_brackets: bool = False + should_split_rhs: bool = False + magic_trailing_comma: Leaf | None = None + + def append( + self, leaf: Leaf, preformatted: bool = False, track_bracket: bool = False + ) -> None: + """Add a new `leaf` to the end of the line. + + Unless `preformatted` is True, the `leaf` will receive a new consistent + whitespace prefix and metadata applied by :class:`BracketTracker`. + Trailing commas are maybe removed, unpacked for loop variables are + demoted from being delimiters. + + Inline comments are put aside. + """ + has_value = ( + leaf.type in BRACKETS + # empty fstring and tstring middles must not be truncated + or leaf.type in (token.FSTRING_MIDDLE, token.TSTRING_MIDDLE) + or bool(leaf.value.strip()) + ) + if not has_value: + return + + if leaf.type == token.COLON and self.is_class_paren_empty: + del self.leaves[-2:] + if self.leaves and not preformatted: + # Note: at this point leaf.prefix should be empty except for + # imports, for which we only preserve newlines. + leaf.prefix += whitespace( + leaf, + complex_subscript=self.is_complex_subscript(leaf), + mode=self.mode, + ) + if self.inside_brackets or not preformatted or track_bracket: + self.bracket_tracker.mark(leaf) + if self.mode.magic_trailing_comma: + if self.has_magic_trailing_comma(leaf): + self.magic_trailing_comma = leaf + elif self.has_magic_trailing_comma(leaf): + self.remove_trailing_comma() + if not self.append_comment(leaf): + self.leaves.append(leaf) + + def append_safe(self, leaf: Leaf, preformatted: bool = False) -> None: + """Like :func:`append()` but disallow invalid standalone comment structure. + + Raises ValueError when any `leaf` is appended after a standalone comment + or when a standalone comment is not the first leaf on the line. + """ + if ( + self.bracket_tracker.depth == 0 + or self.bracket_tracker.any_open_for_or_lambda() + ): + if self.is_comment: + raise ValueError("cannot append to standalone comments") + + if self.leaves and leaf.type == STANDALONE_COMMENT: + raise ValueError( + "cannot append standalone comments to a populated line" + ) + + self.append(leaf, preformatted=preformatted) + + @property + def is_comment(self) -> bool: + """Is this line a standalone comment?""" + return len(self.leaves) == 1 and self.leaves[0].type == STANDALONE_COMMENT + + @property + def is_decorator(self) -> bool: + """Is this line a decorator?""" + return bool(self) and self.leaves[0].type == token.AT + + @property + def is_import(self) -> bool: + """Is this an import line?""" + return bool(self) and is_import(self.leaves[0]) + + @property + def is_with_or_async_with_stmt(self) -> bool: + """Is this a with_stmt line?""" + return bool(self) and is_with_or_async_with_stmt(self.leaves[0]) + + @property + def is_class(self) -> bool: + """Is this line a class definition?""" + return ( + bool(self) + and self.leaves[0].type == token.NAME + and self.leaves[0].value == "class" + ) + + @property + def is_stub_class(self) -> bool: + """Is this line a class definition with a body consisting only of "..."?""" + return self.is_class and self.leaves[-3:] == [ + Leaf(token.DOT, ".") for _ in range(3) + ] + + @property + def is_def(self) -> bool: + """Is this a function definition? (Also returns True for async defs.)""" + try: + first_leaf = self.leaves[0] + except IndexError: + return False + + try: + second_leaf: Leaf | None = self.leaves[1] + except IndexError: + second_leaf = None + return (first_leaf.type == token.NAME and first_leaf.value == "def") or ( + first_leaf.type == token.ASYNC + and second_leaf is not None + and second_leaf.type == token.NAME + and second_leaf.value == "def" + ) + + @property + def is_stub_def(self) -> bool: + """Is this line a function definition with a body consisting only of "..."?""" + return self.is_def and self.leaves[-4:] == [Leaf(token.COLON, ":")] + [ + Leaf(token.DOT, ".") for _ in range(3) + ] + + @property + def is_class_paren_empty(self) -> bool: + """Is this a class with no base classes but using parentheses? + + Those are unnecessary and should be removed. + """ + return ( + bool(self) + and len(self.leaves) == 4 + and self.is_class + and self.leaves[2].type == token.LPAR + and self.leaves[2].value == "(" + and self.leaves[3].type == token.RPAR + and self.leaves[3].value == ")" + ) + + @property + def _is_triple_quoted_string(self) -> bool: + """Is the line a triple quoted string?""" + if not self or self.leaves[0].type != token.STRING: + return False + value = self.leaves[0].value + if value.startswith(('"""', "'''")): + return True + if value.startswith(("r'''", 'r"""', "R'''", 'R"""')): + return True + return False + + @property + def is_docstring(self) -> bool: + """Is the line a docstring?""" + return bool(self) and is_docstring(self.leaves[0]) + + @property + def is_chained_assignment(self) -> bool: + """Is the line a chained assignment""" + return [leaf.type for leaf in self.leaves].count(token.EQUAL) > 1 + + @property + def opens_block(self) -> bool: + """Does this line open a new level of indentation.""" + if len(self.leaves) == 0: + return False + return self.leaves[-1].type == token.COLON + + def is_fmt_pass_converted( + self, *, first_leaf_matches: Callable[[Leaf], bool] | None = None + ) -> bool: + """Is this line converted from fmt off/skip code? + + If first_leaf_matches is not None, it only returns True if the first + leaf of converted code matches. + """ + if len(self.leaves) != 1: + return False + leaf = self.leaves[0] + if ( + leaf.type != STANDALONE_COMMENT + or leaf.fmt_pass_converted_first_leaf is None + ): + return False + return first_leaf_matches is None or first_leaf_matches( + leaf.fmt_pass_converted_first_leaf + ) + + def contains_standalone_comments(self) -> bool: + """If so, needs to be split before emitting.""" + for leaf in self.leaves: + if leaf.type == STANDALONE_COMMENT: + return True + + return False + + def contains_implicit_multiline_string_with_comments(self) -> bool: + """Chck if we have an implicit multiline string with comments on the line""" + for leaf_type, leaf_group_iterator in itertools.groupby( + self.leaves, lambda leaf: leaf.type + ): + if leaf_type != token.STRING: + continue + leaf_list = list(leaf_group_iterator) + if len(leaf_list) == 1: + continue + for leaf in leaf_list: + if self.comments_after(leaf): + return True + return False + + def contains_uncollapsable_type_comments(self) -> bool: + ignored_ids = set() + try: + last_leaf = self.leaves[-1] + ignored_ids.add(id(last_leaf)) + if last_leaf.type == token.COMMA or ( + last_leaf.type == token.RPAR and not last_leaf.value + ): + # When trailing commas or optional parens are inserted by Black for + # consistency, comments after the previous last element are not moved + # (they don't have to, rendering will still be correct). So we ignore + # trailing commas and invisible. + last_leaf = self.leaves[-2] + ignored_ids.add(id(last_leaf)) + except IndexError: + return False + + # A type comment is uncollapsable if it is attached to a leaf + # that isn't at the end of the line (since that could cause it + # to get associated to a different argument) or if there are + # comments before it (since that could cause it to get hidden + # behind a comment. + comment_seen = False + for leaf_id, comments in self.comments.items(): + for comment in comments: + if is_type_comment(comment, mode=self.mode): + if comment_seen or ( + not is_type_ignore_comment(comment, mode=self.mode) + and leaf_id not in ignored_ids + ): + return True + + comment_seen = True + + return False + + def contains_unsplittable_type_ignore(self) -> bool: + if not self.leaves: + return False + + # If a 'type: ignore' is attached to the end of a line, we + # can't split the line, because we can't know which of the + # subexpressions the ignore was meant to apply to. + # + # We only want this to apply to actual physical lines from the + # original source, though: we don't want the presence of a + # 'type: ignore' at the end of a multiline expression to + # justify pushing it all onto one line. Thus we + # (unfortunately) need to check the actual source lines and + # only report an unsplittable 'type: ignore' if this line was + # one line in the original code. + + # Grab the first and last line numbers, skipping generated leaves + first_line = next((leaf.lineno for leaf in self.leaves if leaf.lineno != 0), 0) + last_line = next( + (leaf.lineno for leaf in reversed(self.leaves) if leaf.lineno != 0), 0 + ) + + if first_line == last_line: + # We look at the last two leaves since a comma or an + # invisible paren could have been added at the end of the + # line. + for node in self.leaves[-2:]: + for comment in self.comments.get(id(node), []): + if is_type_ignore_comment(comment, mode=self.mode): + return True + + return False + + def contains_multiline_strings(self) -> bool: + return any(is_multiline_string(leaf) for leaf in self.leaves) + + def has_magic_trailing_comma(self, closing: Leaf) -> bool: + """Return True if we have a magic trailing comma, that is when: + - there's a trailing comma here + - it's not from single-element square bracket indexing + - it's not a one-tuple + """ + if not ( + closing.type in CLOSING_BRACKETS + and self.leaves + and self.leaves[-1].type == token.COMMA + ): + return False + + if closing.type == token.RBRACE: + return True + + if closing.type == token.RSQB: + if ( + closing.parent is not None + and closing.parent.type == syms.trailer + and closing.opening_bracket is not None + and is_one_sequence_between( + closing.opening_bracket, + closing, + self.leaves, + brackets=(token.LSQB, token.RSQB), + ) + ): + assert closing.prev_sibling is not None + assert closing.prev_sibling.type == syms.subscriptlist + return False + + return True + + if self.is_import: + return True + + if closing.opening_bracket is not None and not is_one_sequence_between( + closing.opening_bracket, closing, self.leaves + ): + return True + + return False + + def append_comment(self, comment: Leaf) -> bool: + """Add an inline or standalone comment to the line.""" + if ( + comment.type == STANDALONE_COMMENT + and self.bracket_tracker.any_open_brackets() + ): + comment.prefix = "" + return False + + if comment.type != token.COMMENT: + return False + + if not self.leaves: + comment.type = STANDALONE_COMMENT + comment.prefix = "" + return False + + last_leaf = self.leaves[-1] + if ( + last_leaf.type == token.RPAR + and not last_leaf.value + and last_leaf.parent + and len(list(last_leaf.parent.leaves())) <= 3 + and not is_type_comment(comment, mode=self.mode) + ): + # Comments on an optional parens wrapping a single leaf should belong to + # the wrapped node except if it's a type comment. Pinning the comment like + # this avoids unstable formatting caused by comment migration. + if len(self.leaves) < 2: + comment.type = STANDALONE_COMMENT + comment.prefix = "" + return False + + last_leaf = self.leaves[-2] + self.comments.setdefault(id(last_leaf), []).append(comment) + return True + + def comments_after(self, leaf: Leaf) -> list[Leaf]: + """Generate comments that should appear directly after `leaf`.""" + return self.comments.get(id(leaf), []) + + def remove_trailing_comma(self) -> None: + """Remove the trailing comma and moves the comments attached to it.""" + trailing_comma = self.leaves.pop() + trailing_comma_comments = self.comments.pop(id(trailing_comma), []) + self.comments.setdefault(id(self.leaves[-1]), []).extend( + trailing_comma_comments + ) + + def is_complex_subscript(self, leaf: Leaf) -> bool: + """Return True iff `leaf` is part of a slice with non-trivial exprs.""" + open_lsqb = self.bracket_tracker.get_open_lsqb() + if open_lsqb is None: + return False + + subscript_start = open_lsqb.next_sibling + + if isinstance(subscript_start, Node): + if subscript_start.type == syms.listmaker: + return False + + if subscript_start.type == syms.subscriptlist: + subscript_start = child_towards(subscript_start, leaf) + + return subscript_start is not None and any( + n.type in TEST_DESCENDANTS for n in subscript_start.pre_order() + ) + + def enumerate_with_length( + self, is_reversed: bool = False + ) -> Iterator[tuple[Index, Leaf, int]]: + """Return an enumeration of leaves with their length. + + Stops prematurely on multiline strings and standalone comments. + """ + op = cast( + Callable[[Sequence[Leaf]], Iterator[tuple[Index, Leaf]]], + enumerate_reversed if is_reversed else enumerate, + ) + for index, leaf in op(self.leaves): + length = len(leaf.prefix) + len(leaf.value) + if "\n" in leaf.value: + return # Multiline strings, we can't continue. + + for comment in self.comments_after(leaf): + length += len(comment.value) + + yield index, leaf, length + + def clone(self) -> "Line": + return Line( + mode=self.mode, + depth=self.depth, + inside_brackets=self.inside_brackets, + should_split_rhs=self.should_split_rhs, + magic_trailing_comma=self.magic_trailing_comma, + ) + + def __str__(self) -> str: + """Render the line.""" + if not self: + return "\n" + + indent = " " * self.depth + leaves = iter(self.leaves) + first = next(leaves) + res = f"{first.prefix}{indent}{first.value}" + res += "".join(str(leaf) for leaf in leaves) + comments_iter = itertools.chain.from_iterable(self.comments.values()) + comments = [str(comment) for comment in comments_iter] + res += "".join(comments) + + return res + "\n" + + def __bool__(self) -> bool: + """Return True if the line has leaves or comments.""" + return bool(self.leaves or self.comments) + + +@dataclass +class RHSResult: + """Intermediate split result from a right hand split.""" + + head: Line + body: Line + tail: Line + opening_bracket: Leaf + closing_bracket: Leaf + + +@dataclass +class LinesBlock: + """Class that holds information about a block of formatted lines. + + This is introduced so that the EmptyLineTracker can look behind the standalone + comments and adjust their empty lines for class or def lines. + """ + + mode: Mode + previous_block: Optional["LinesBlock"] + original_line: Line + before: int = 0 + content_lines: list[str] = field(default_factory=list) + after: int = 0 + form_feed: bool = False + + def all_lines(self) -> list[str]: + empty_line = str(Line(mode=self.mode)) + prefix = make_simple_prefix(self.before, self.form_feed, empty_line) + return [prefix] + self.content_lines + [empty_line * self.after] + + +@dataclass +class EmptyLineTracker: + """Provides a stateful method that returns the number of potential extra + empty lines needed before and after the currently processed line. + + Note: this tracker works on lines that haven't been split yet. It assumes + the prefix of the first leaf consists of optional newlines. Those newlines + are consumed by `maybe_empty_lines()` and included in the computation. + """ + + mode: Mode + previous_line: Line | None = None + previous_block: LinesBlock | None = None + previous_defs: list[Line] = field(default_factory=list) + semantic_leading_comment: LinesBlock | None = None + + def maybe_empty_lines(self, current_line: Line) -> LinesBlock: + """Return the number of extra empty lines before and after the `current_line`. + + This is for separating `def`, `async def` and `class` with extra empty + lines (two on module-level). + """ + form_feed = ( + current_line.depth == 0 + and bool(current_line.leaves) + and "\f\n" in current_line.leaves[0].prefix + ) + before, after = self._maybe_empty_lines(current_line) + previous_after = self.previous_block.after if self.previous_block else 0 + before = max(0, before - previous_after) + if Preview.fix_module_docstring_detection in self.mode: + # Always have one empty line after a module docstring + if self._line_is_module_docstring(current_line): + before = 1 + else: + if ( + # Always have one empty line after a module docstring + self.previous_block + and self.previous_block.previous_block is None + and len(self.previous_block.original_line.leaves) == 1 + and self.previous_block.original_line.is_docstring + and not (current_line.is_class or current_line.is_def) + ): + before = 1 + + block = LinesBlock( + mode=self.mode, + previous_block=self.previous_block, + original_line=current_line, + before=before, + after=after, + form_feed=form_feed, + ) + + # Maintain the semantic_leading_comment state. + if current_line.is_comment: + if self.previous_line is None or ( + not self.previous_line.is_decorator + # `or before` means this comment already has an empty line before + and (not self.previous_line.is_comment or before) + and (self.semantic_leading_comment is None or before) + ): + self.semantic_leading_comment = block + # `or before` means this decorator already has an empty line before + elif not current_line.is_decorator or before: + self.semantic_leading_comment = None + + self.previous_line = current_line + self.previous_block = block + return block + + def _line_is_module_docstring(self, current_line: Line) -> bool: + previous_block = self.previous_block + if not previous_block: + return False + if ( + len(previous_block.original_line.leaves) != 1 + or not previous_block.original_line.is_docstring + or current_line.is_class + or current_line.is_def + ): + return False + while previous_block := previous_block.previous_block: + if not previous_block.original_line.is_comment: + return False + return True + + def _maybe_empty_lines(self, current_line: Line) -> tuple[int, int]: + max_allowed = 1 + if current_line.depth == 0: + max_allowed = 1 if self.mode.is_pyi else 2 + + if current_line.leaves: + # Consume the first leaf's extra newlines. + first_leaf = current_line.leaves[0] + before = first_leaf.prefix.count("\n") + before = min(before, max_allowed) + first_leaf.prefix = "" + else: + before = 0 + + user_had_newline = bool(before) + depth = current_line.depth + + # Mutate self.previous_defs, remainder of this function should be pure + previous_def = None + while self.previous_defs and self.previous_defs[-1].depth >= depth: + previous_def = self.previous_defs.pop() + if current_line.is_def or current_line.is_class: + self.previous_defs.append(current_line) + + if self.previous_line is None: + # Don't insert empty lines before the first line in the file. + return 0, 0 + + if current_line.is_docstring: + if self.previous_line.is_class: + return 0, 1 + if self.previous_line.opens_block and self.previous_line.is_def: + return 0, 0 + + if previous_def is not None: + assert self.previous_line is not None + if self.mode.is_pyi: + if previous_def.is_class and not previous_def.is_stub_class: + before = 1 + elif depth and not current_line.is_def and self.previous_line.is_def: + # Empty lines between attributes and methods should be preserved. + before = 1 if user_had_newline else 0 + elif depth: + before = 0 + else: + before = 1 + else: + if depth: + before = 1 + elif ( + not depth + and previous_def.depth + and current_line.leaves[-1].type == token.COLON + and ( + current_line.leaves[0].value + not in ("with", "try", "for", "while", "if", "match") + ) + ): + # We shouldn't add two newlines between an indented function and + # a dependent non-indented clause. This is to avoid issues with + # conditional function definitions that are technically top-level + # and therefore get two trailing newlines, but look weird and + # inconsistent when they're followed by elif, else, etc. This is + # worse because these functions only get *one* preceding newline + # already. + before = 1 + else: + before = 2 + + if current_line.is_decorator or current_line.is_def or current_line.is_class: + return self._maybe_empty_lines_for_class_or_def( + current_line, before, user_had_newline + ) + + if ( + self.previous_line.is_import + and self.previous_line.depth == 0 + and current_line.depth == 0 + and not current_line.is_import + and not current_line.is_fmt_pass_converted(first_leaf_matches=is_import) + and Preview.always_one_newline_after_import in self.mode + ): + return 1, 0 + + if ( + self.previous_line.is_import + and not current_line.is_import + and not current_line.is_fmt_pass_converted(first_leaf_matches=is_import) + and depth == self.previous_line.depth + ): + return (before or 1), 0 + + return before, 0 + + def _maybe_empty_lines_for_class_or_def( + self, current_line: Line, before: int, user_had_newline: bool + ) -> tuple[int, int]: + assert self.previous_line is not None + + if self.previous_line.is_decorator: + if self.mode.is_pyi and current_line.is_stub_class: + # Insert an empty line after a decorated stub class + return 0, 1 + return 0, 0 + + if self.previous_line.depth < current_line.depth and ( + self.previous_line.is_class or self.previous_line.is_def + ): + if self.mode.is_pyi: + return 0, 0 + return 1 if user_had_newline else 0, 0 + + comment_to_add_newlines: LinesBlock | None = None + if ( + self.previous_line.is_comment + and self.previous_line.depth == current_line.depth + and before == 0 + ): + slc = self.semantic_leading_comment + if ( + slc is not None + and slc.previous_block is not None + and not slc.previous_block.original_line.is_class + and not slc.previous_block.original_line.opens_block + and slc.before <= 1 + ): + comment_to_add_newlines = slc + else: + return 0, 0 + + if self.mode.is_pyi: + if current_line.is_class or self.previous_line.is_class: + if self.previous_line.depth < current_line.depth: + newlines = 0 + elif self.previous_line.depth > current_line.depth: + newlines = 1 + elif current_line.is_stub_class and self.previous_line.is_stub_class: + # No blank line between classes with an empty body + newlines = 0 + else: + newlines = 1 + # Don't inspect the previous line if it's part of the body of the previous + # statement in the same level, we always want a blank line if there's + # something with a body preceding. + elif self.previous_line.depth > current_line.depth: + newlines = 1 + elif ( + current_line.is_def or current_line.is_decorator + ) and not self.previous_line.is_def: + if current_line.depth: + # In classes empty lines between attributes and methods should + # be preserved. + newlines = min(1, before) + else: + # Blank line between a block of functions (maybe with preceding + # decorators) and a block of non-functions + newlines = 1 + else: + newlines = 0 + else: + newlines = 1 if current_line.depth else 2 + # If a user has left no space after a dummy implementation, don't insert + # new lines. This is useful for instance for @overload or Protocols. + if self.previous_line.is_stub_def and not user_had_newline: + newlines = 0 + if comment_to_add_newlines is not None: + previous_block = comment_to_add_newlines.previous_block + if previous_block is not None: + comment_to_add_newlines.before = ( + max(comment_to_add_newlines.before, newlines) - previous_block.after + ) + newlines = 0 + return newlines, 0 + + +def enumerate_reversed(sequence: Sequence[T]) -> Iterator[tuple[Index, T]]: + """Like `reversed(enumerate(sequence))` if that were possible.""" + index = len(sequence) - 1 + for element in reversed(sequence): + yield (index, element) + index -= 1 + + +def append_leaves( + new_line: Line, old_line: Line, leaves: list[Leaf], preformatted: bool = False +) -> None: + """ + Append leaves (taken from @old_line) to @new_line, making sure to fix the + underlying Node structure where appropriate. + + All of the leaves in @leaves are duplicated. The duplicates are then + appended to @new_line and used to replace their originals in the underlying + Node structure. Any comments attached to the old leaves are reattached to + the new leaves. + + Pre-conditions: + set(@leaves) is a subset of set(@old_line.leaves). + """ + for old_leaf in leaves: + new_leaf = Leaf(old_leaf.type, old_leaf.value) + replace_child(old_leaf, new_leaf) + new_line.append(new_leaf, preformatted=preformatted) + + for comment_leaf in old_line.comments_after(old_leaf): + new_line.append(comment_leaf, preformatted=True) + + +def is_line_short_enough(line: Line, *, mode: Mode, line_str: str = "") -> bool: + """For non-multiline strings, return True if `line` is no longer than `line_length`. + For multiline strings, looks at the context around `line` to determine + if it should be inlined or split up. + Uses the provided `line_str` rendering, if any, otherwise computes a new one. + """ + if not line_str: + line_str = line_to_string(line) + + if Preview.multiline_string_handling not in mode: + return ( + str_width(line_str) <= mode.line_length + and "\n" not in line_str # multiline strings + and not line.contains_standalone_comments() + ) + + if line.contains_standalone_comments(): + return False + if "\n" not in line_str: + # No multiline strings (MLS) present + return str_width(line_str) <= mode.line_length + + first, *_, last = line_str.split("\n") + if str_width(first) > mode.line_length or str_width(last) > mode.line_length: + return False + + # Traverse the AST to examine the context of the multiline string (MLS), + # tracking aspects such as depth and comma existence, + # to determine whether to split the MLS or keep it together. + # Depth (which is based on the existing bracket_depth concept) + # is needed to determine nesting level of the MLS. + # Includes special case for trailing commas. + commas: list[int] = [] # tracks number of commas per depth level + multiline_string: Leaf | None = None + # store the leaves that contain parts of the MLS + multiline_string_contexts: list[LN] = [] + + max_level_to_update: int | float = math.inf # track the depth of the MLS + for i, leaf in enumerate(line.leaves): + if max_level_to_update == math.inf: + had_comma: int | None = None + if leaf.bracket_depth + 1 > len(commas): + commas.append(0) + elif leaf.bracket_depth + 1 < len(commas): + had_comma = commas.pop() + if ( + had_comma is not None + and multiline_string is not None + and multiline_string.bracket_depth == leaf.bracket_depth + 1 + ): + # Have left the level with the MLS, stop tracking commas + max_level_to_update = leaf.bracket_depth + if had_comma > 0: + # MLS was in parens with at least one comma - force split + return False + + if leaf.bracket_depth <= max_level_to_update and leaf.type == token.COMMA: + # Inside brackets, ignore trailing comma + # directly after MLS/MLS-containing expression + ignore_ctxs: list[LN | None] = [None] + ignore_ctxs += multiline_string_contexts + if (line.inside_brackets or leaf.bracket_depth > 0) and ( + i != len(line.leaves) - 1 or leaf.prev_sibling not in ignore_ctxs + ): + commas[leaf.bracket_depth] += 1 + if max_level_to_update != math.inf: + max_level_to_update = min(max_level_to_update, leaf.bracket_depth) + + if is_multiline_string(leaf): + if leaf.parent and ( + leaf.parent.type == syms.test + or (leaf.parent.parent and leaf.parent.parent.type == syms.dictsetmaker) + ): + # Keep ternary and dictionary values parenthesized + return False + if len(multiline_string_contexts) > 0: + # >1 multiline string cannot fit on a single line - force split + return False + multiline_string = leaf + ctx: LN = leaf + # fetch the leaf components of the MLS in the AST + while str(ctx) in line_str: + multiline_string_contexts.append(ctx) + if ctx.parent is None: + break + ctx = ctx.parent + + # May not have a triple-quoted multiline string at all, + # in case of a regular string with embedded newlines and line continuations + if len(multiline_string_contexts) == 0: + return True + + return all(val == 0 for val in commas) + + +def can_be_split(line: Line) -> bool: + """Return False if the line cannot be split *for sure*. + + This is not an exhaustive search but a cheap heuristic that we can use to + avoid some unfortunate formattings (mostly around wrapping unsplittable code + in unnecessary parentheses). + """ + leaves = line.leaves + if len(leaves) < 2: + return False + + if leaves[0].type == token.STRING and leaves[1].type == token.DOT: + call_count = 0 + dot_count = 0 + next = leaves[-1] + for leaf in leaves[-2::-1]: + if leaf.type in OPENING_BRACKETS: + if next.type not in CLOSING_BRACKETS: + return False + + call_count += 1 + elif leaf.type == token.DOT: + dot_count += 1 + elif leaf.type == token.NAME: + if not (next.type == token.DOT or next.type in OPENING_BRACKETS): + return False + + elif leaf.type not in CLOSING_BRACKETS: + return False + + if dot_count > 1 and call_count > 1: + return False + + return True + + +def can_omit_invisible_parens( + rhs: RHSResult, + line_length: int, +) -> bool: + """Does `rhs.body` have a shape safe to reformat without optional parens around it? + + Returns True for only a subset of potentially nice looking formattings but + the point is to not return false positives that end up producing lines that + are too long. + """ + line = rhs.body + + # We need optional parens in order to split standalone comments to their own lines + # if there are no nested parens around the standalone comments + closing_bracket: Leaf | None = None + for leaf in reversed(line.leaves): + if closing_bracket and leaf is closing_bracket.opening_bracket: + closing_bracket = None + if leaf.type == STANDALONE_COMMENT and not closing_bracket: + return False + if ( + not closing_bracket + and leaf.type in CLOSING_BRACKETS + and leaf.opening_bracket in line.leaves + and leaf.value + ): + closing_bracket = leaf + + bt = line.bracket_tracker + if not bt.delimiters: + # Without delimiters the optional parentheses are useless. + return True + + max_priority = bt.max_delimiter_priority() + delimiter_count = bt.delimiter_count_with_priority(max_priority) + if delimiter_count > 1: + # With more than one delimiter of a kind the optional parentheses read better. + return False + + if delimiter_count == 1: + if max_priority == COMMA_PRIORITY and rhs.head.is_with_or_async_with_stmt: + # For two context manager with statements, the optional parentheses read + # better. In this case, `rhs.body` is the context managers part of + # the with statement. `rhs.head` is the `with (` part on the previous + # line. + return False + # Otherwise it may also read better, but we don't do it today and requires + # careful considerations for all possible cases. See + # https://github.com/psf/black/issues/2156. + + if max_priority == DOT_PRIORITY: + # A single stranded method call doesn't require optional parentheses. + return True + + assert len(line.leaves) >= 2, "Stranded delimiter" + + # With a single delimiter, omit if the expression starts or ends with + # a bracket. + first = line.leaves[0] + second = line.leaves[1] + if first.type in OPENING_BRACKETS and second.type not in CLOSING_BRACKETS: + if _can_omit_opening_paren(line, first=first, line_length=line_length): + return True + + # Note: we are not returning False here because a line might have *both* + # a leading opening bracket and a trailing closing bracket. If the + # opening bracket doesn't match our rule, maybe the closing will. + + penultimate = line.leaves[-2] + last = line.leaves[-1] + + if ( + last.type == token.RPAR + or last.type == token.RBRACE + or ( + # don't use indexing for omitting optional parentheses; + # it looks weird + last.type == token.RSQB + and last.parent + and last.parent.type != syms.trailer + ) + ): + if penultimate.type in OPENING_BRACKETS: + # Empty brackets don't help. + return False + + if is_multiline_string(first): + # Additional wrapping of a multiline string in this situation is + # unnecessary. + return True + + if _can_omit_closing_paren(line, last=last, line_length=line_length): + return True + + return False + + +def _can_omit_opening_paren(line: Line, *, first: Leaf, line_length: int) -> bool: + """See `can_omit_invisible_parens`.""" + remainder = False + length = 4 * line.depth + _index = -1 + for _index, leaf, leaf_length in line.enumerate_with_length(): + if leaf.type in CLOSING_BRACKETS and leaf.opening_bracket is first: + remainder = True + if remainder: + length += leaf_length + if length > line_length: + break + + if leaf.type in OPENING_BRACKETS: + # There are brackets we can further split on. + remainder = False + + else: + # checked the entire string and line length wasn't exceeded + if len(line.leaves) == _index + 1: + return True + + return False + + +def _can_omit_closing_paren(line: Line, *, last: Leaf, line_length: int) -> bool: + """See `can_omit_invisible_parens`.""" + length = 4 * line.depth + seen_other_brackets = False + for _index, leaf, leaf_length in line.enumerate_with_length(): + length += leaf_length + if leaf is last.opening_bracket: + if seen_other_brackets or length <= line_length: + return True + + elif leaf.type in OPENING_BRACKETS: + # There are brackets we can further split on. + seen_other_brackets = True + + return False + + +def line_to_string(line: Line) -> str: + """Returns the string representation of @line. + + WARNING: This is known to be computationally expensive. + """ + return str(line).strip("\n") diff --git a/py311/lib/python3.11/site-packages/black/mode.cpython-311-x86_64-linux-gnu.so b/py311/lib/python3.11/site-packages/black/mode.cpython-311-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..8e5b51fb8bd906ce7df9fb504340725ac3cfc0e8 Binary files /dev/null and b/py311/lib/python3.11/site-packages/black/mode.cpython-311-x86_64-linux-gnu.so differ diff --git a/py311/lib/python3.11/site-packages/black/mode.py b/py311/lib/python3.11/site-packages/black/mode.py new file mode 100644 index 0000000000000000000000000000000000000000..702f580e97963f2352c875fa41f9f00df02ec7d7 --- /dev/null +++ b/py311/lib/python3.11/site-packages/black/mode.py @@ -0,0 +1,335 @@ +"""Data structures configuring Black behavior. + +Mostly around Python language feature support per version and Black configuration +chosen by the user. +""" + +from dataclasses import dataclass, field +from enum import Enum, auto +from hashlib import sha256 +from operator import attrgetter +from typing import Final + +from black.const import DEFAULT_LINE_LENGTH + + +class TargetVersion(Enum): + PY33 = 3 + PY34 = 4 + PY35 = 5 + PY36 = 6 + PY37 = 7 + PY38 = 8 + PY39 = 9 + PY310 = 10 + PY311 = 11 + PY312 = 12 + PY313 = 13 + PY314 = 14 + + def pretty(self) -> str: + assert self.name[:2] == "PY" + return f"Python {self.name[2]}.{self.name[3:]}" + + +class Feature(Enum): + F_STRINGS = 2 + NUMERIC_UNDERSCORES = 3 + TRAILING_COMMA_IN_CALL = 4 + TRAILING_COMMA_IN_DEF = 5 + # The following two feature-flags are mutually exclusive, and exactly one should be + # set for every version of python. + ASYNC_IDENTIFIERS = 6 + ASYNC_KEYWORDS = 7 + ASSIGNMENT_EXPRESSIONS = 8 + POS_ONLY_ARGUMENTS = 9 + RELAXED_DECORATORS = 10 + PATTERN_MATCHING = 11 + UNPACKING_ON_FLOW = 12 + ANN_ASSIGN_EXTENDED_RHS = 13 + EXCEPT_STAR = 14 + VARIADIC_GENERICS = 15 + DEBUG_F_STRINGS = 16 + PARENTHESIZED_CONTEXT_MANAGERS = 17 + TYPE_PARAMS = 18 + # FSTRING_PARSING = 19 # unused + TYPE_PARAM_DEFAULTS = 20 + UNPARENTHESIZED_EXCEPT_TYPES = 21 + T_STRINGS = 22 + FORCE_OPTIONAL_PARENTHESES = 50 + + # __future__ flags + FUTURE_ANNOTATIONS = 51 + + +FUTURE_FLAG_TO_FEATURE: Final = { + "annotations": Feature.FUTURE_ANNOTATIONS, +} + + +VERSION_TO_FEATURES: dict[TargetVersion, set[Feature]] = { + TargetVersion.PY33: {Feature.ASYNC_IDENTIFIERS}, + TargetVersion.PY34: {Feature.ASYNC_IDENTIFIERS}, + TargetVersion.PY35: {Feature.TRAILING_COMMA_IN_CALL, Feature.ASYNC_IDENTIFIERS}, + TargetVersion.PY36: { + Feature.F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_IDENTIFIERS, + }, + TargetVersion.PY37: { + Feature.F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_KEYWORDS, + Feature.FUTURE_ANNOTATIONS, + }, + TargetVersion.PY38: { + Feature.F_STRINGS, + Feature.DEBUG_F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_KEYWORDS, + Feature.FUTURE_ANNOTATIONS, + Feature.ASSIGNMENT_EXPRESSIONS, + Feature.POS_ONLY_ARGUMENTS, + Feature.UNPACKING_ON_FLOW, + Feature.ANN_ASSIGN_EXTENDED_RHS, + }, + TargetVersion.PY39: { + Feature.F_STRINGS, + Feature.DEBUG_F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_KEYWORDS, + Feature.FUTURE_ANNOTATIONS, + Feature.ASSIGNMENT_EXPRESSIONS, + Feature.RELAXED_DECORATORS, + Feature.POS_ONLY_ARGUMENTS, + Feature.UNPACKING_ON_FLOW, + Feature.ANN_ASSIGN_EXTENDED_RHS, + Feature.PARENTHESIZED_CONTEXT_MANAGERS, + }, + TargetVersion.PY310: { + Feature.F_STRINGS, + Feature.DEBUG_F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_KEYWORDS, + Feature.FUTURE_ANNOTATIONS, + Feature.ASSIGNMENT_EXPRESSIONS, + Feature.RELAXED_DECORATORS, + Feature.POS_ONLY_ARGUMENTS, + Feature.UNPACKING_ON_FLOW, + Feature.ANN_ASSIGN_EXTENDED_RHS, + Feature.PARENTHESIZED_CONTEXT_MANAGERS, + Feature.PATTERN_MATCHING, + }, + TargetVersion.PY311: { + Feature.F_STRINGS, + Feature.DEBUG_F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_KEYWORDS, + Feature.FUTURE_ANNOTATIONS, + Feature.ASSIGNMENT_EXPRESSIONS, + Feature.RELAXED_DECORATORS, + Feature.POS_ONLY_ARGUMENTS, + Feature.UNPACKING_ON_FLOW, + Feature.ANN_ASSIGN_EXTENDED_RHS, + Feature.PARENTHESIZED_CONTEXT_MANAGERS, + Feature.PATTERN_MATCHING, + Feature.EXCEPT_STAR, + Feature.VARIADIC_GENERICS, + }, + TargetVersion.PY312: { + Feature.F_STRINGS, + Feature.DEBUG_F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_KEYWORDS, + Feature.FUTURE_ANNOTATIONS, + Feature.ASSIGNMENT_EXPRESSIONS, + Feature.RELAXED_DECORATORS, + Feature.POS_ONLY_ARGUMENTS, + Feature.UNPACKING_ON_FLOW, + Feature.ANN_ASSIGN_EXTENDED_RHS, + Feature.PARENTHESIZED_CONTEXT_MANAGERS, + Feature.PATTERN_MATCHING, + Feature.EXCEPT_STAR, + Feature.VARIADIC_GENERICS, + Feature.TYPE_PARAMS, + }, + TargetVersion.PY313: { + Feature.F_STRINGS, + Feature.DEBUG_F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_KEYWORDS, + Feature.FUTURE_ANNOTATIONS, + Feature.ASSIGNMENT_EXPRESSIONS, + Feature.RELAXED_DECORATORS, + Feature.POS_ONLY_ARGUMENTS, + Feature.UNPACKING_ON_FLOW, + Feature.ANN_ASSIGN_EXTENDED_RHS, + Feature.PARENTHESIZED_CONTEXT_MANAGERS, + Feature.PATTERN_MATCHING, + Feature.EXCEPT_STAR, + Feature.VARIADIC_GENERICS, + Feature.TYPE_PARAMS, + Feature.TYPE_PARAM_DEFAULTS, + }, + TargetVersion.PY314: { + Feature.F_STRINGS, + Feature.DEBUG_F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_KEYWORDS, + Feature.FUTURE_ANNOTATIONS, + Feature.ASSIGNMENT_EXPRESSIONS, + Feature.RELAXED_DECORATORS, + Feature.POS_ONLY_ARGUMENTS, + Feature.UNPACKING_ON_FLOW, + Feature.ANN_ASSIGN_EXTENDED_RHS, + Feature.PARENTHESIZED_CONTEXT_MANAGERS, + Feature.PATTERN_MATCHING, + Feature.EXCEPT_STAR, + Feature.VARIADIC_GENERICS, + Feature.TYPE_PARAMS, + Feature.TYPE_PARAM_DEFAULTS, + Feature.UNPARENTHESIZED_EXCEPT_TYPES, + Feature.T_STRINGS, + }, +} + + +def supports_feature(target_versions: set[TargetVersion], feature: Feature) -> bool: + if not target_versions: + raise ValueError("target_versions must not be empty") + + return all(feature in VERSION_TO_FEATURES[version] for version in target_versions) + + +class Preview(Enum): + """Individual preview style features.""" + + # NOTE: string_processing requires wrap_long_dict_values_in_parens + # for https://github.com/psf/black/issues/3117 to be fixed. + string_processing = auto() + hug_parens_with_braces_and_square_brackets = auto() + wrap_long_dict_values_in_parens = auto() + multiline_string_handling = auto() + always_one_newline_after_import = auto() + fix_fmt_skip_in_one_liners = auto() + standardize_type_comments = auto() + wrap_comprehension_in = auto() + # Remove parentheses around multiple exception types in except and + # except* without as. See PEP 758 for details. + remove_parens_around_except_types = auto() + normalize_cr_newlines = auto() + fix_module_docstring_detection = auto() + fix_type_expansion_split = auto() + remove_parens_from_assignment_lhs = auto() + + +UNSTABLE_FEATURES: set[Preview] = { + # Many issues, see summary in https://github.com/psf/black/issues/4042 + Preview.string_processing, + # See issue #4036 (crash), #4098, #4099 (proposed tweaks) + Preview.hug_parens_with_braces_and_square_brackets, +} + + +class Deprecated(UserWarning): + """Visible deprecation warning.""" + + +_MAX_CACHE_KEY_PART_LENGTH: Final = 32 + + +@dataclass +class Mode: + target_versions: set[TargetVersion] = field(default_factory=set) + line_length: int = DEFAULT_LINE_LENGTH + string_normalization: bool = True + is_pyi: bool = False + is_ipynb: bool = False + skip_source_first_line: bool = False + magic_trailing_comma: bool = True + python_cell_magics: set[str] = field(default_factory=set) + preview: bool = False + unstable: bool = False + enabled_features: set[Preview] = field(default_factory=set) + + def __contains__(self, feature: Preview) -> bool: + """ + Provide `Preview.FEATURE in Mode` syntax that mirrors the ``preview`` flag. + + In unstable mode, all features are enabled. In preview mode, all features + except those in UNSTABLE_FEATURES are enabled. Any features in + `self.enabled_features` are also enabled. + """ + if self.unstable: + return True + if feature in self.enabled_features: + return True + return self.preview and feature not in UNSTABLE_FEATURES + + def get_cache_key(self) -> str: + if self.target_versions: + version_str = ",".join( + str(version.value) + for version in sorted(self.target_versions, key=attrgetter("value")) + ) + else: + version_str = "-" + if len(version_str) > _MAX_CACHE_KEY_PART_LENGTH: + version_str = sha256(version_str.encode()).hexdigest()[ + :_MAX_CACHE_KEY_PART_LENGTH + ] + features_and_magics = ( + ",".join(sorted(f.name for f in self.enabled_features)) + + "@" + + ",".join(sorted(self.python_cell_magics)) + ) + if len(features_and_magics) > _MAX_CACHE_KEY_PART_LENGTH: + features_and_magics = sha256(features_and_magics.encode()).hexdigest()[ + :_MAX_CACHE_KEY_PART_LENGTH + ] + parts = [ + version_str, + str(self.line_length), + str(int(self.string_normalization)), + str(int(self.is_pyi)), + str(int(self.is_ipynb)), + str(int(self.skip_source_first_line)), + str(int(self.magic_trailing_comma)), + str(int(self.preview)), + str(int(self.unstable)), + features_and_magics, + ] + return ".".join(parts) + + def __hash__(self) -> int: + return hash(( + frozenset(self.target_versions), + self.line_length, + self.string_normalization, + self.is_pyi, + self.is_ipynb, + self.skip_source_first_line, + self.magic_trailing_comma, + frozenset(self.python_cell_magics), + self.preview, + self.unstable, + frozenset(self.enabled_features), + )) diff --git a/py311/lib/python3.11/site-packages/black/nodes.cpython-311-x86_64-linux-gnu.so b/py311/lib/python3.11/site-packages/black/nodes.cpython-311-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..d4bcc979d826f206229a6a9d643dad782928723d Binary files /dev/null and b/py311/lib/python3.11/site-packages/black/nodes.cpython-311-x86_64-linux-gnu.so differ diff --git a/py311/lib/python3.11/site-packages/black/nodes.py b/py311/lib/python3.11/site-packages/black/nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..96bc20f20b3674e6679e8a034d9160357bd18c83 --- /dev/null +++ b/py311/lib/python3.11/site-packages/black/nodes.py @@ -0,0 +1,1093 @@ +""" +blib2to3 Node/Leaf transformation-related utility functions. +""" + +from collections.abc import Iterator +from typing import Final, Generic, Literal, TypeGuard, TypeVar, Union + +from mypy_extensions import mypyc_attr + +from black.cache import CACHE_DIR +from black.mode import Mode, Preview +from black.strings import get_string_prefix, has_triple_quotes +from blib2to3 import pygram +from blib2to3.pgen2 import token +from blib2to3.pytree import NL, Leaf, Node, type_repr + +pygram.initialize(CACHE_DIR) +syms: Final = pygram.python_symbols + + +# types +T = TypeVar("T") +LN = Union[Leaf, Node] +LeafID = int +NodeType = int + + +WHITESPACE: Final = {token.DEDENT, token.INDENT, token.NEWLINE} +STATEMENT: Final = { + syms.if_stmt, + syms.while_stmt, + syms.for_stmt, + syms.try_stmt, + syms.except_clause, + syms.with_stmt, + syms.funcdef, + syms.classdef, + syms.match_stmt, + syms.case_block, +} +STANDALONE_COMMENT: Final = 153 +token.tok_name[STANDALONE_COMMENT] = "STANDALONE_COMMENT" +LOGIC_OPERATORS: Final = {"and", "or"} +COMPARATORS: Final = { + token.LESS, + token.GREATER, + token.EQEQUAL, + token.NOTEQUAL, + token.LESSEQUAL, + token.GREATEREQUAL, +} +MATH_OPERATORS: Final = { + token.VBAR, + token.CIRCUMFLEX, + token.AMPER, + token.LEFTSHIFT, + token.RIGHTSHIFT, + token.PLUS, + token.MINUS, + token.STAR, + token.SLASH, + token.DOUBLESLASH, + token.PERCENT, + token.AT, + token.TILDE, + token.DOUBLESTAR, +} +STARS: Final = {token.STAR, token.DOUBLESTAR} +VARARGS_SPECIALS: Final = STARS | {token.SLASH} +VARARGS_PARENTS: Final = { + syms.arglist, + syms.argument, # double star in arglist + syms.trailer, # single argument to call + syms.typedargslist, + syms.varargslist, # lambdas +} +UNPACKING_PARENTS: Final = { + syms.atom, # single element of a list or set literal + syms.dictsetmaker, + syms.listmaker, + syms.testlist_gexp, + syms.testlist_star_expr, + syms.subject_expr, + syms.pattern, +} +TEST_DESCENDANTS: Final = { + syms.test, + syms.lambdef, + syms.or_test, + syms.and_test, + syms.not_test, + syms.comparison, + syms.star_expr, + syms.expr, + syms.xor_expr, + syms.and_expr, + syms.shift_expr, + syms.arith_expr, + syms.trailer, + syms.term, + syms.power, + syms.namedexpr_test, +} +TYPED_NAMES: Final = {syms.tname, syms.tname_star} +ASSIGNMENTS: Final = { + "=", + "+=", + "-=", + "*=", + "@=", + "/=", + "%=", + "&=", + "|=", + "^=", + "<<=", + ">>=", + "**=", + "//=", + ":", +} + +IMPLICIT_TUPLE: Final = {syms.testlist, syms.testlist_star_expr, syms.exprlist} +BRACKET: Final = { + token.LPAR: token.RPAR, + token.LSQB: token.RSQB, + token.LBRACE: token.RBRACE, +} +OPENING_BRACKETS: Final = set(BRACKET.keys()) +CLOSING_BRACKETS: Final = set(BRACKET.values()) +BRACKETS: Final = OPENING_BRACKETS | CLOSING_BRACKETS +ALWAYS_NO_SPACE: Final = CLOSING_BRACKETS | { + token.COMMA, + STANDALONE_COMMENT, + token.FSTRING_MIDDLE, + token.FSTRING_END, + token.TSTRING_MIDDLE, + token.TSTRING_END, + token.BANG, +} + +RARROW = 55 + + +@mypyc_attr(allow_interpreted_subclasses=True) +class Visitor(Generic[T]): + """Basic lib2to3 visitor that yields things of type `T` on `visit()`.""" + + def visit(self, node: LN) -> Iterator[T]: + """Main method to visit `node` and its children. + + It tries to find a `visit_*()` method for the given `node.type`, like + `visit_simple_stmt` for Node objects or `visit_INDENT` for Leaf objects. + If no dedicated `visit_*()` method is found, chooses `visit_default()` + instead. + + Then yields objects of type `T` from the selected visitor. + """ + if node.type < 256: + name = token.tok_name[node.type] + else: + name = str(type_repr(node.type)) + # We explicitly branch on whether a visitor exists (instead of + # using self.visit_default as the default arg to getattr) in order + # to save needing to create a bound method object and so mypyc can + # generate a native call to visit_default. + visitf = getattr(self, f"visit_{name}", None) + if visitf: + yield from visitf(node) + else: + yield from self.visit_default(node) + + def visit_default(self, node: LN) -> Iterator[T]: + """Default `visit_*()` implementation. Recurses to children of `node`.""" + if isinstance(node, Node): + for child in node.children: + yield from self.visit(child) + + +def whitespace(leaf: Leaf, *, complex_subscript: bool, mode: Mode) -> str: + """Return whitespace prefix if needed for the given `leaf`. + + `complex_subscript` signals whether the given leaf is part of a subscription + which has non-trivial arguments, like arithmetic expressions or function calls. + """ + NO: Final[str] = "" + SPACE: Final[str] = " " + DOUBLESPACE: Final[str] = " " + t = leaf.type + p = leaf.parent + v = leaf.value + if t in ALWAYS_NO_SPACE: + return NO + + if t == token.COMMENT: + return DOUBLESPACE + + assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}" + if t == token.COLON and p.type not in { + syms.subscript, + syms.subscriptlist, + syms.sliceop, + }: + return NO + + if t == token.LBRACE and p.type in ( + syms.fstring_replacement_field, + syms.tstring_replacement_field, + ): + return NO + + prev = leaf.prev_sibling + if not prev: + prevp = preceding_leaf(p) + if not prevp or prevp.type in OPENING_BRACKETS: + return NO + + if t == token.COLON: + if prevp.type == token.COLON: + return NO + + elif prevp.type != token.COMMA and not complex_subscript: + return NO + + return SPACE + + if prevp.type == token.EQUAL: + if prevp.parent: + if prevp.parent.type in { + syms.arglist, + syms.argument, + syms.parameters, + syms.varargslist, + }: + return NO + + elif prevp.parent.type == syms.typedargslist: + # A bit hacky: if the equal sign has whitespace, it means we + # previously found it's a typed argument. So, we're using + # that, too. + return prevp.prefix + + elif ( + prevp.type == token.STAR + and parent_type(prevp) == syms.star_expr + and parent_type(prevp.parent) in (syms.subscriptlist, syms.tname_star) + ): + # No space between typevar tuples or unpacking them. + return NO + + elif prevp.type in VARARGS_SPECIALS: + if is_vararg(prevp, within=VARARGS_PARENTS | UNPACKING_PARENTS): + return NO + + elif prevp.type == token.COLON: + if prevp.parent and prevp.parent.type in {syms.subscript, syms.sliceop}: + return SPACE if complex_subscript else NO + + elif ( + prevp.parent + and prevp.parent.type == syms.factor + and prevp.type in MATH_OPERATORS + ): + return NO + + elif prevp.type == token.AT and p.parent and p.parent.type == syms.decorator: + # no space in decorators + return NO + + elif prev.type in OPENING_BRACKETS: + return NO + + elif prev.type == token.BANG: + return NO + + if p.type in {syms.parameters, syms.arglist}: + # untyped function signatures or calls + if not prev or prev.type != token.COMMA: + return NO + + elif p.type == syms.varargslist: + # lambdas + if prev and prev.type != token.COMMA: + return NO + + elif p.type == syms.typedargslist: + # typed function signatures + if not prev: + return NO + + if t == token.EQUAL: + if prev.type not in TYPED_NAMES: + return NO + + elif prev.type == token.EQUAL: + # A bit hacky: if the equal sign has whitespace, it means we + # previously found it's a typed argument. So, we're using that, too. + return prev.prefix + + elif prev.type != token.COMMA: + return NO + + elif p.type in TYPED_NAMES: + # type names + if not prev: + prevp = preceding_leaf(p) + if not prevp or prevp.type != token.COMMA: + return NO + + elif p.type == syms.trailer: + # attributes and calls + if t == token.LPAR or t == token.RPAR: + return NO + + if not prev: + if t == token.DOT or t == token.LSQB: + return NO + + elif prev.type != token.COMMA: + return NO + + elif p.type == syms.argument: + # single argument + if t == token.EQUAL: + return NO + + if not prev: + prevp = preceding_leaf(p) + if not prevp or prevp.type == token.LPAR: + return NO + + elif prev.type in {token.EQUAL} | VARARGS_SPECIALS: + return NO + + elif p.type == syms.decorator: + # decorators + return NO + + elif p.type == syms.dotted_name: + if prev: + return NO + + prevp = preceding_leaf(p) + if not prevp or prevp.type == token.AT or prevp.type == token.DOT: + return NO + + elif p.type == syms.classdef: + if t == token.LPAR: + return NO + + if prev and prev.type == token.LPAR: + return NO + + elif p.type in {syms.subscript, syms.sliceop}: + # indexing + if not prev: + assert p.parent is not None, "subscripts are always parented" + if p.parent.type == syms.subscriptlist: + return SPACE + + return NO + + elif t == token.COLONEQUAL or prev.type == token.COLONEQUAL: + return SPACE + + elif not complex_subscript: + return NO + + elif p.type == syms.atom: + if prev and t == token.DOT: + # dots, but not the first one. + return NO + + elif p.type == syms.dictsetmaker: + # dict unpacking + if prev and prev.type == token.DOUBLESTAR: + return NO + + elif p.type in {syms.factor, syms.star_expr}: + # unary ops + if not prev: + prevp = preceding_leaf(p) + if not prevp or prevp.type in OPENING_BRACKETS: + return NO + + prevp_parent = prevp.parent + assert prevp_parent is not None + if prevp.type == token.COLON and prevp_parent.type in { + syms.subscript, + syms.sliceop, + }: + return NO + + elif prevp.type == token.EQUAL and prevp_parent.type == syms.argument: + return NO + + elif t in {token.NAME, token.NUMBER, token.STRING}: + return NO + + elif p.type == syms.import_from: + if t == token.DOT: + if prev and prev.type == token.DOT: + return NO + + elif t == token.NAME: + if v == "import": + return SPACE + + if prev and prev.type == token.DOT: + return NO + + elif p.type == syms.sliceop: + return NO + + elif p.type == syms.except_clause: + if t == token.STAR: + return NO + + return SPACE + + +def make_simple_prefix(nl_count: int, form_feed: bool, empty_line: str = "\n") -> str: + """Generate a normalized prefix string.""" + if form_feed: + return (empty_line * (nl_count - 1)) + "\f" + empty_line + return empty_line * nl_count + + +def preceding_leaf(node: LN | None) -> Leaf | None: + """Return the first leaf that precedes `node`, if any.""" + while node: + res = node.prev_sibling + if res: + if isinstance(res, Leaf): + return res + + try: + return list(res.leaves())[-1] + + except IndexError: + return None + + node = node.parent + return None + + +def prev_siblings_are(node: LN | None, tokens: list[NodeType | None]) -> bool: + """Return if the `node` and its previous siblings match types against the provided + list of tokens; the provided `node`has its type matched against the last element in + the list. `None` can be used as the first element to declare that the start of the + list is anchored at the start of its parent's children.""" + if not tokens: + return True + if tokens[-1] is None: + return node is None + if not node: + return False + if node.type != tokens[-1]: + return False + return prev_siblings_are(node.prev_sibling, tokens[:-1]) + + +def parent_type(node: LN | None) -> NodeType | None: + """ + Returns: + @node.parent.type, if @node is not None and has a parent. + OR + None, otherwise. + """ + if node is None or node.parent is None: + return None + + return node.parent.type + + +def child_towards(ancestor: Node, descendant: LN) -> LN | None: + """Return the child of `ancestor` that contains `descendant`.""" + node: LN | None = descendant + while node and node.parent != ancestor: + node = node.parent + return node + + +def replace_child(old_child: LN, new_child: LN) -> None: + """ + Side Effects: + * If @old_child.parent is set, replace @old_child with @new_child in + @old_child's underlying Node structure. + OR + * Otherwise, this function does nothing. + """ + parent = old_child.parent + if not parent: + return + + child_idx = old_child.remove() + if child_idx is not None: + parent.insert_child(child_idx, new_child) + + +def container_of(leaf: Leaf) -> LN: + """Return `leaf` or one of its ancestors that is the topmost container of it. + + By "container" we mean a node where `leaf` is the very first child. + """ + same_prefix = leaf.prefix + container: LN = leaf + while container: + parent = container.parent + if parent is None: + break + + if parent.children[0].prefix != same_prefix: + break + + if parent.type == syms.file_input: + break + + if parent.prev_sibling is not None and parent.prev_sibling.type in BRACKETS: + break + + container = parent + return container + + +def first_leaf_of(node: LN) -> Leaf | None: + """Returns the first leaf of the node tree.""" + if isinstance(node, Leaf): + return node + if node.children: + return first_leaf_of(node.children[0]) + else: + return None + + +def is_arith_like(node: LN) -> bool: + """Whether node is an arithmetic or a binary arithmetic expression""" + return node.type in { + syms.arith_expr, + syms.shift_expr, + syms.xor_expr, + syms.and_expr, + } + + +def is_docstring(node: NL) -> bool: + if isinstance(node, Leaf): + if node.type != token.STRING: + return False + + prefix = get_string_prefix(node.value) + if set(prefix).intersection("bBfF"): + return False + + if ( + node.parent + and node.parent.type == syms.simple_stmt + and not node.parent.prev_sibling + and node.parent.parent + and node.parent.parent.type == syms.file_input + ): + return True + + if prev_siblings_are( + node.parent, [None, token.NEWLINE, token.INDENT, syms.simple_stmt] + ): + return True + + # Multiline docstring on the same line as the `def`. + if prev_siblings_are(node.parent, [syms.parameters, token.COLON, syms.simple_stmt]): + # `syms.parameters` is only used in funcdefs and async_funcdefs in the Python + # grammar. We're safe to return True without further checks. + return True + + return False + + +def is_empty_tuple(node: LN) -> bool: + """Return True if `node` holds an empty tuple.""" + return ( + node.type == syms.atom + and len(node.children) == 2 + and node.children[0].type == token.LPAR + and node.children[1].type == token.RPAR + ) + + +def is_one_tuple(node: LN) -> bool: + """Return True if `node` holds a tuple with one element, with or without parens.""" + if node.type == syms.atom: + gexp = unwrap_singleton_parenthesis(node) + if gexp is None or gexp.type != syms.testlist_gexp: + return False + + return len(gexp.children) == 2 and gexp.children[1].type == token.COMMA + + return ( + node.type in IMPLICIT_TUPLE + and len(node.children) == 2 + and node.children[1].type == token.COMMA + ) + + +def is_tuple(node: LN) -> bool: + """Return True if `node` holds a tuple.""" + if node.type != syms.atom: + return False + gexp = unwrap_singleton_parenthesis(node) + if gexp is None or gexp.type != syms.testlist_gexp: + return False + + return True + + +def is_tuple_containing_walrus(node: LN) -> bool: + """Return True if `node` holds a tuple that contains a walrus operator.""" + if node.type != syms.atom: + return False + gexp = unwrap_singleton_parenthesis(node) + if gexp is None or gexp.type != syms.testlist_gexp: + return False + + return any(child.type == syms.namedexpr_test for child in gexp.children) + + +def is_tuple_containing_star(node: LN) -> bool: + """Return True if `node` holds a tuple that contains a star operator.""" + if node.type != syms.atom: + return False + gexp = unwrap_singleton_parenthesis(node) + if gexp is None or gexp.type != syms.testlist_gexp: + return False + + return any(child.type == syms.star_expr for child in gexp.children) + + +def is_generator(node: LN) -> bool: + """Return True if `node` holds a generator.""" + if node.type != syms.atom: + return False + gexp = unwrap_singleton_parenthesis(node) + if gexp is None or gexp.type != syms.testlist_gexp: + return False + + return any(child.type == syms.old_comp_for for child in gexp.children) + + +def is_one_sequence_between( + opening: Leaf, + closing: Leaf, + leaves: list[Leaf], + brackets: tuple[int, int] = (token.LPAR, token.RPAR), +) -> bool: + """Return True if content between `opening` and `closing` is a one-sequence.""" + if (opening.type, closing.type) != brackets: + return False + + depth = closing.bracket_depth + 1 + for _opening_index, leaf in enumerate(leaves): + if leaf is opening: + break + + else: + raise LookupError("Opening paren not found in `leaves`") + + commas = 0 + _opening_index += 1 + for leaf in leaves[_opening_index:]: + if leaf is closing: + break + + bracket_depth = leaf.bracket_depth + if bracket_depth == depth and leaf.type == token.COMMA: + commas += 1 + if leaf.parent and leaf.parent.type in { + syms.arglist, + syms.typedargslist, + }: + commas += 1 + break + + return commas < 2 + + +def is_walrus_assignment(node: LN) -> bool: + """Return True iff `node` is of the shape ( test := test )""" + inner = unwrap_singleton_parenthesis(node) + return inner is not None and inner.type == syms.namedexpr_test + + +def is_simple_decorator_trailer(node: LN, last: bool = False) -> bool: + """Return True iff `node` is a trailer valid in a simple decorator""" + return node.type == syms.trailer and ( + ( + len(node.children) == 2 + and node.children[0].type == token.DOT + and node.children[1].type == token.NAME + ) + # last trailer can be an argument-less parentheses pair + or ( + last + and len(node.children) == 2 + and node.children[0].type == token.LPAR + and node.children[1].type == token.RPAR + ) + # last trailer can be arguments + or ( + last + and len(node.children) == 3 + and node.children[0].type == token.LPAR + # and node.children[1].type == syms.argument + and node.children[2].type == token.RPAR + ) + ) + + +def is_simple_decorator_expression(node: LN) -> bool: + """Return True iff `node` could be a 'dotted name' decorator + + This function takes the node of the 'namedexpr_test' of the new decorator + grammar and test if it would be valid under the old decorator grammar. + + The old grammar was: decorator: @ dotted_name [arguments] NEWLINE + The new grammar is : decorator: @ namedexpr_test NEWLINE + """ + if node.type == token.NAME: + return True + if node.type == syms.power: + if node.children: + return ( + node.children[0].type == token.NAME + and all(map(is_simple_decorator_trailer, node.children[1:-1])) + and ( + len(node.children) < 2 + or is_simple_decorator_trailer(node.children[-1], last=True) + ) + ) + return False + + +def is_yield(node: LN) -> bool: + """Return True if `node` holds a `yield` or `yield from` expression.""" + if node.type == syms.yield_expr: + return True + + if is_name_token(node) and node.value == "yield": + return True + + if node.type != syms.atom: + return False + + if len(node.children) != 3: + return False + + lpar, expr, rpar = node.children + if lpar.type == token.LPAR and rpar.type == token.RPAR: + return is_yield(expr) + + return False + + +def is_vararg(leaf: Leaf, within: set[NodeType]) -> bool: + """Return True if `leaf` is a star or double star in a vararg or kwarg. + + If `within` includes VARARGS_PARENTS, this applies to function signatures. + If `within` includes UNPACKING_PARENTS, it applies to right hand-side + extended iterable unpacking (PEP 3132) and additional unpacking + generalizations (PEP 448). + """ + if leaf.type not in VARARGS_SPECIALS or not leaf.parent: + return False + + p = leaf.parent + if p.type == syms.star_expr: + # Star expressions are also used as assignment targets in extended + # iterable unpacking (PEP 3132). See what its parent is instead. + if not p.parent: + return False + + p = p.parent + + return p.type in within + + +def is_fstring(node: Node) -> bool: + """Return True if the node is an f-string""" + return node.type == syms.fstring + + +def fstring_tstring_to_string(node: Node) -> Leaf: + """Converts an fstring or tstring node back to a string node.""" + string_without_prefix = str(node)[len(node.prefix) :] + string_leaf = Leaf(token.STRING, string_without_prefix, prefix=node.prefix) + string_leaf.lineno = node.get_lineno() or 0 + return string_leaf + + +def is_multiline_string(node: LN) -> bool: + """Return True if `leaf` is a multiline string that actually spans many lines.""" + if isinstance(node, Node) and is_fstring(node): + leaf = fstring_tstring_to_string(node) + elif isinstance(node, Leaf): + leaf = node + else: + return False + + return has_triple_quotes(leaf.value) and "\n" in leaf.value + + +def is_parent_function_or_class(node: Node) -> bool: + assert node.type in {syms.suite, syms.simple_stmt} + assert node.parent is not None + # Note this works for suites / simple_stmts in async def as well + return node.parent.type in {syms.funcdef, syms.classdef} + + +def is_function_or_class(node: Node) -> bool: + return node.type in {syms.funcdef, syms.classdef, syms.async_funcdef} + + +def is_stub_suite(node: Node) -> bool: + """Return True if `node` is a suite with a stub body.""" + if node.parent is not None and not is_parent_function_or_class(node): + return False + + # If there is a comment, we want to keep it. + if node.prefix.strip(): + return False + + if ( + len(node.children) != 4 + or node.children[0].type != token.NEWLINE + or node.children[1].type != token.INDENT + or node.children[3].type != token.DEDENT + ): + return False + + if node.children[3].prefix.strip(): + return False + + return is_stub_body(node.children[2]) + + +def is_stub_body(node: LN) -> bool: + """Return True if `node` is a simple statement containing an ellipsis.""" + if not isinstance(node, Node) or node.type != syms.simple_stmt: + return False + + if len(node.children) != 2: + return False + + child = node.children[0] + return ( + not child.prefix.strip() + and child.type == syms.atom + and len(child.children) == 3 + and all(leaf == Leaf(token.DOT, ".") for leaf in child.children) + ) + + +def is_atom_with_invisible_parens(node: LN) -> bool: + """Given a `LN`, determines whether it's an atom `node` with invisible + parens. Useful in dedupe-ing and normalizing parens. + """ + if isinstance(node, Leaf) or node.type != syms.atom: + return False + + first, last = node.children[0], node.children[-1] + return ( + isinstance(first, Leaf) + and first.type == token.LPAR + and first.value == "" + and isinstance(last, Leaf) + and last.type == token.RPAR + and last.value == "" + ) + + +def is_empty_par(leaf: Leaf) -> bool: + return is_empty_lpar(leaf) or is_empty_rpar(leaf) + + +def is_empty_lpar(leaf: Leaf) -> bool: + return leaf.type == token.LPAR and leaf.value == "" + + +def is_empty_rpar(leaf: Leaf) -> bool: + return leaf.type == token.RPAR and leaf.value == "" + + +def is_import(leaf: Leaf) -> bool: + """Return True if the given leaf starts an import statement.""" + p = leaf.parent + t = leaf.type + v = leaf.value + return bool( + t == token.NAME + and ( + (v == "import" and p and p.type == syms.import_name) + or (v == "from" and p and p.type == syms.import_from) + ) + ) + + +def is_with_or_async_with_stmt(leaf: Leaf) -> bool: + """Return True if the given leaf starts a with or async with statement.""" + return bool( + leaf.type == token.NAME + and leaf.value == "with" + and leaf.parent + and leaf.parent.type == syms.with_stmt + ) or bool( + leaf.type == token.ASYNC + and leaf.next_sibling + and leaf.next_sibling.type == syms.with_stmt + ) + + +def is_async_stmt_or_funcdef(leaf: Leaf) -> bool: + """Return True if the given leaf starts an async def/for/with statement. + + Note that `async def` can be either an `async_stmt` or `async_funcdef`, + the latter is used when it has decorators. + """ + return bool( + leaf.type == token.ASYNC + and leaf.parent + and leaf.parent.type in {syms.async_stmt, syms.async_funcdef} + ) + + +def is_type_comment(leaf: Leaf, mode: Mode) -> bool: + """Return True if the given leaf is a type comment. This function should only + be used for general type comments (excluding ignore annotations, which should + use `is_type_ignore_comment`). Note that general type comments are no longer + used in modern version of Python, this function may be deprecated in the future.""" + t = leaf.type + v = leaf.value + return t in {token.COMMENT, STANDALONE_COMMENT} and is_type_comment_string(v, mode) + + +def is_type_comment_string(value: str, mode: Mode) -> bool: + if Preview.standardize_type_comments in mode: + is_valid = value.startswith("#") and value[1:].lstrip().startswith("type:") + else: + is_valid = value.startswith("# type:") + return is_valid + + +def is_type_ignore_comment(leaf: Leaf, mode: Mode) -> bool: + """Return True if the given leaf is a type comment with ignore annotation.""" + t = leaf.type + v = leaf.value + return t in {token.COMMENT, STANDALONE_COMMENT} and is_type_ignore_comment_string( + v, mode + ) + + +def is_type_ignore_comment_string(value: str, mode: Mode) -> bool: + """Return True if the given string match with type comment with + ignore annotation.""" + if Preview.standardize_type_comments in mode: + is_valid = is_type_comment_string(value, mode) and value.split(":", 1)[ + 1 + ].lstrip().startswith("ignore") + else: + is_valid = value.startswith("# type: ignore") + + return is_valid + + +def wrap_in_parentheses(parent: Node, child: LN, *, visible: bool = True) -> None: + """Wrap `child` in parentheses. + + This replaces `child` with an atom holding the parentheses and the old + child. That requires moving the prefix. + + If `visible` is False, the leaves will be valueless (and thus invisible). + """ + lpar = Leaf(token.LPAR, "(" if visible else "") + rpar = Leaf(token.RPAR, ")" if visible else "") + prefix = child.prefix + child.prefix = "" + index = child.remove() or 0 + new_child = Node(syms.atom, [lpar, child, rpar]) + new_child.prefix = prefix + parent.insert_child(index, new_child) + + +def unwrap_singleton_parenthesis(node: LN) -> LN | None: + """Returns `wrapped` if `node` is of the shape ( wrapped ). + + Parenthesis can be optional. Returns None otherwise""" + if len(node.children) != 3: + return None + + lpar, wrapped, rpar = node.children + if not (lpar.type == token.LPAR and rpar.type == token.RPAR): + return None + + return wrapped + + +def ensure_visible(leaf: Leaf) -> None: + """Make sure parentheses are visible. + + They could be invisible as part of some statements (see + :func:`normalize_invisible_parens` and :func:`visit_import_from`). + """ + if leaf.type == token.LPAR: + leaf.value = "(" + elif leaf.type == token.RPAR: + leaf.value = ")" + + +def is_name_token(nl: NL) -> TypeGuard[Leaf]: + return nl.type == token.NAME + + +def is_lpar_token(nl: NL) -> TypeGuard[Leaf]: + return nl.type == token.LPAR + + +def is_rpar_token(nl: NL) -> TypeGuard[Leaf]: + return nl.type == token.RPAR + + +def is_number_token(nl: NL) -> TypeGuard[Leaf]: + return nl.type == token.NUMBER + + +def get_annotation_type(leaf: Leaf) -> Literal["return", "param", None]: + """Returns the type of annotation this leaf is part of, if any.""" + ancestor = leaf.parent + while ancestor is not None: + if ancestor.prev_sibling and ancestor.prev_sibling.type == token.RARROW: + return "return" + if ancestor.parent and ancestor.parent.type == syms.tname: + return "param" + ancestor = ancestor.parent + return None + + +def is_part_of_annotation(leaf: Leaf) -> bool: + """Returns whether this leaf is part of a type annotation.""" + assert leaf.parent is not None + return get_annotation_type(leaf) is not None + + +def first_leaf(node: LN) -> Leaf | None: + """Returns the first leaf of the ancestor node.""" + if isinstance(node, Leaf): + return node + elif not node.children: + return None + else: + return first_leaf(node.children[0]) + + +def last_leaf(node: LN) -> Leaf | None: + """Returns the last leaf of the ancestor node.""" + if isinstance(node, Leaf): + return node + elif not node.children: + return None + else: + return last_leaf(node.children[-1]) + + +def furthest_ancestor_with_last_leaf(leaf: Leaf) -> LN: + """Returns the furthest ancestor that has this leaf node as the last leaf.""" + node: LN = leaf + while node.parent and node.parent.children and node is node.parent.children[-1]: + node = node.parent + return node + + +def has_sibling_with_type(node: LN, type: int) -> bool: + # Check previous siblings + sibling = node.prev_sibling + while sibling is not None: + if sibling.type == type: + return True + sibling = sibling.prev_sibling + + # Check next siblings + sibling = node.next_sibling + while sibling is not None: + if sibling.type == type: + return True + sibling = sibling.next_sibling + + return False diff --git a/py311/lib/python3.11/site-packages/black/numerics.cpython-311-x86_64-linux-gnu.so b/py311/lib/python3.11/site-packages/black/numerics.cpython-311-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..2b96f8c5b1038c0632544d8671d53ee1475e8823 Binary files /dev/null and b/py311/lib/python3.11/site-packages/black/numerics.cpython-311-x86_64-linux-gnu.so differ diff --git a/py311/lib/python3.11/site-packages/black/numerics.py b/py311/lib/python3.11/site-packages/black/numerics.py new file mode 100644 index 0000000000000000000000000000000000000000..3040de06fde3033ba333bca7934e8314c516a148 --- /dev/null +++ b/py311/lib/python3.11/site-packages/black/numerics.py @@ -0,0 +1,61 @@ +""" +Formatting numeric literals. +""" + +from blib2to3.pytree import Leaf + + +def format_hex(text: str) -> str: + """ + Formats a hexadecimal string like "0x12B3" + """ + before, after = text[:2], text[2:] + return f"{before}{after.upper()}" + + +def format_scientific_notation(text: str) -> str: + """Formats a numeric string utilizing scientific notation""" + before, after = text.split("e") + sign = "" + if after.startswith("-"): + after = after[1:] + sign = "-" + elif after.startswith("+"): + after = after[1:] + before = format_float_or_int_string(before) + return f"{before}e{sign}{after}" + + +def format_complex_number(text: str) -> str: + """Formats a complex string like `10j`""" + number = text[:-1] + suffix = text[-1] + return f"{format_float_or_int_string(number)}{suffix}" + + +def format_float_or_int_string(text: str) -> str: + """Formats a float string like "1.0".""" + if "." not in text: + return text + + before, after = text.split(".") + return f"{before or 0}.{after or 0}" + + +def normalize_numeric_literal(leaf: Leaf) -> None: + """Normalizes numeric (float, int, and complex) literals. + + All letters used in the representation are normalized to lowercase.""" + text = leaf.value.lower() + if text.startswith(("0o", "0b")): + # Leave octal and binary literals alone. + pass + elif text.startswith("0x"): + text = format_hex(text) + elif "e" in text: + text = format_scientific_notation(text) + elif text.endswith("j"): + text = format_complex_number(text) + else: + text = format_float_or_int_string(text) + leaf.value = text diff --git a/py311/lib/python3.11/site-packages/black/output.py b/py311/lib/python3.11/site-packages/black/output.py new file mode 100644 index 0000000000000000000000000000000000000000..76c28c344a903183ab6fa4c85c80a818ee71674a --- /dev/null +++ b/py311/lib/python3.11/site-packages/black/output.py @@ -0,0 +1,122 @@ +"""Nice output for Black. + +The double calls are for patching purposes in tests. +""" + +import json +import re +import tempfile +from typing import Any + +from click import echo, style +from mypy_extensions import mypyc_attr + + +@mypyc_attr(patchable=True) +def _out(message: str | None = None, nl: bool = True, **styles: Any) -> None: + if message is not None: + if "bold" not in styles: + styles["bold"] = True + message = style(message, **styles) + echo(message, nl=nl, err=True) + + +@mypyc_attr(patchable=True) +def _err(message: str | None = None, nl: bool = True, **styles: Any) -> None: + if message is not None: + if "fg" not in styles: + styles["fg"] = "red" + message = style(message, **styles) + echo(message, nl=nl, err=True) + + +@mypyc_attr(patchable=True) +def out(message: str | None = None, nl: bool = True, **styles: Any) -> None: + _out(message, nl=nl, **styles) + + +def err(message: str | None = None, nl: bool = True, **styles: Any) -> None: + _err(message, nl=nl, **styles) + + +def ipynb_diff(a: str, b: str, a_name: str, b_name: str) -> str: + """Return a unified diff string between each cell in notebooks `a` and `b`.""" + a_nb = json.loads(a) + b_nb = json.loads(b) + diff_lines = [ + diff( + "".join(a_nb["cells"][cell_number]["source"]) + "\n", + "".join(b_nb["cells"][cell_number]["source"]) + "\n", + f"{a_name}:cell_{cell_number}", + f"{b_name}:cell_{cell_number}", + ) + for cell_number, cell in enumerate(a_nb["cells"]) + if cell["cell_type"] == "code" + ] + return "".join(diff_lines) + + +_line_pattern = re.compile(r"(.*?(?:\r\n|\n|\r|$))") + + +def _splitlines_no_ff(source: str) -> list[str]: + """Split a string into lines ignoring form feed and other chars. + + This mimics how the Python parser splits source code. + + A simplified version of the function with the same name in Lib/ast.py + """ + result = [match[0] for match in _line_pattern.finditer(source)] + if result[-1] == "": + result.pop(-1) + return result + + +def diff(a: str, b: str, a_name: str, b_name: str) -> str: + """Return a unified diff string between strings `a` and `b`.""" + import difflib + + a_lines = _splitlines_no_ff(a) + b_lines = _splitlines_no_ff(b) + diff_lines = [] + for line in difflib.unified_diff( + a_lines, b_lines, fromfile=a_name, tofile=b_name, n=5 + ): + # Work around https://bugs.python.org/issue2142 + # See: + # https://www.gnu.org/software/diffutils/manual/html_node/Incomplete-Lines.html + if line[-1] == "\n": + diff_lines.append(line) + else: + diff_lines.append(line + "\n") + diff_lines.append("\\ No newline at end of file\n") + return "".join(diff_lines) + + +def color_diff(contents: str) -> str: + """Inject the ANSI color codes to the diff.""" + lines = contents.split("\n") + for i, line in enumerate(lines): + if line.startswith("+++") or line.startswith("---"): + line = "\033[1m" + line + "\033[0m" # bold, reset + elif line.startswith("@@"): + line = "\033[36m" + line + "\033[0m" # cyan, reset + elif line.startswith("+"): + line = "\033[32m" + line + "\033[0m" # green, reset + elif line.startswith("-"): + line = "\033[31m" + line + "\033[0m" # red, reset + lines[i] = line + return "\n".join(lines) + + +@mypyc_attr(patchable=True) +def dump_to_file(*output: str, ensure_final_newline: bool = True) -> str: + """Dump `output` to a temporary file. Return path to the file.""" + with tempfile.NamedTemporaryFile( + mode="w", prefix="blk_", suffix=".log", delete=False, encoding="utf8" + ) as f: + for lines in output: + f.write(lines) + if ensure_final_newline and lines and lines[-1] != "\n": + f.write("\n") + return f.name diff --git a/py311/lib/python3.11/site-packages/black/parsing.cpython-311-x86_64-linux-gnu.so b/py311/lib/python3.11/site-packages/black/parsing.cpython-311-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..d7c5d383a757930fb205677d7f74250571608f6d Binary files /dev/null and b/py311/lib/python3.11/site-packages/black/parsing.cpython-311-x86_64-linux-gnu.so differ diff --git a/py311/lib/python3.11/site-packages/black/parsing.py b/py311/lib/python3.11/site-packages/black/parsing.py new file mode 100644 index 0000000000000000000000000000000000000000..46c6d1d8f701468803c56ec5c47dbec59994dca0 --- /dev/null +++ b/py311/lib/python3.11/site-packages/black/parsing.py @@ -0,0 +1,260 @@ +""" +Parse Python code and perform AST validation. +""" + +import ast +import sys +import warnings +from collections.abc import Collection, Iterator + +from black.mode import VERSION_TO_FEATURES, Feature, TargetVersion, supports_feature +from black.nodes import syms +from blib2to3 import pygram +from blib2to3.pgen2 import driver +from blib2to3.pgen2.grammar import Grammar +from blib2to3.pgen2.parse import ParseError +from blib2to3.pgen2.tokenize import TokenError +from blib2to3.pytree import Leaf, Node + + +class InvalidInput(ValueError): + """Raised when input source code fails all parse attempts.""" + + +def get_grammars(target_versions: set[TargetVersion]) -> list[Grammar]: + if not target_versions: + # No target_version specified, so try all grammars. + return [ + # Python 3.7-3.9 + pygram.python_grammar_async_keywords, + # Python 3.0-3.6 + pygram.python_grammar, + # Python 3.10+ + pygram.python_grammar_soft_keywords, + ] + + grammars = [] + # If we have to parse both, try to parse async as a keyword first + if not supports_feature( + target_versions, Feature.ASYNC_IDENTIFIERS + ) and not supports_feature(target_versions, Feature.PATTERN_MATCHING): + # Python 3.7-3.9 + grammars.append(pygram.python_grammar_async_keywords) + if not supports_feature(target_versions, Feature.ASYNC_KEYWORDS): + # Python 3.0-3.6 + grammars.append(pygram.python_grammar) + if any(Feature.PATTERN_MATCHING in VERSION_TO_FEATURES[v] for v in target_versions): + # Python 3.10+ + grammars.append(pygram.python_grammar_soft_keywords) + + # At least one of the above branches must have been taken, because every Python + # version has exactly one of the two 'ASYNC_*' flags + return grammars + + +def lib2to3_parse( + src_txt: str, target_versions: Collection[TargetVersion] = () +) -> Node: + """Given a string with source, return the lib2to3 Node.""" + if not src_txt.endswith("\n"): + src_txt += "\n" + + grammars = get_grammars(set(target_versions)) + if target_versions: + max_tv = max(target_versions, key=lambda tv: tv.value) + tv_str = f" for target version {max_tv.pretty()}" + else: + tv_str = "" + + errors = {} + for grammar in grammars: + drv = driver.Driver(grammar) + try: + result = drv.parse_string(src_txt, False) + break + + except ParseError as pe: + lineno, column = pe.context[1] + lines = src_txt.splitlines() + try: + faulty_line = lines[lineno - 1] + except IndexError: + faulty_line = "" + errors[grammar.version] = InvalidInput( + f"Cannot parse{tv_str}: {lineno}:{column}: {faulty_line}" + ) + + except TokenError as te: + # In edge cases these are raised; and typically don't have a "faulty_line". + lineno, column = te.args[1] + errors[grammar.version] = InvalidInput( + f"Cannot parse{tv_str}: {lineno}:{column}: {te.args[0]}" + ) + + else: + # Choose the latest version when raising the actual parsing error. + assert len(errors) >= 1 + exc = errors[max(errors)] + raise exc from None + + if isinstance(result, Leaf): + result = Node(syms.file_input, [result]) + return result + + +def matches_grammar(src_txt: str, grammar: Grammar) -> bool: + drv = driver.Driver(grammar) + try: + drv.parse_string(src_txt, False) + except (ParseError, TokenError, IndentationError): + return False + else: + return True + + +def lib2to3_unparse(node: Node) -> str: + """Given a lib2to3 node, return its string representation.""" + code = str(node) + return code + + +class ASTSafetyError(Exception): + """Raised when Black's generated code is not equivalent to the old AST.""" + + +def _parse_single_version( + src: str, version: tuple[int, int], *, type_comments: bool +) -> ast.AST: + filename = "" + with warnings.catch_warnings(): + warnings.simplefilter("ignore", SyntaxWarning) + warnings.simplefilter("ignore", DeprecationWarning) + return ast.parse( + src, filename, feature_version=version, type_comments=type_comments + ) + + +def parse_ast(src: str) -> ast.AST: + # TODO: support Python 4+ ;) + versions = [(3, minor) for minor in range(3, sys.version_info[1] + 1)] + + first_error = "" + for version in sorted(versions, reverse=True): + try: + return _parse_single_version(src, version, type_comments=True) + except SyntaxError as e: + if not first_error: + first_error = str(e) + + # Try to parse without type comments + for version in sorted(versions, reverse=True): + try: + return _parse_single_version(src, version, type_comments=False) + except SyntaxError: + pass + + raise SyntaxError(first_error) + + +def _normalize(lineend: str, value: str) -> str: + # To normalize, we strip any leading and trailing space from + # each line... + stripped: list[str] = [i.strip() for i in value.splitlines()] + normalized = lineend.join(stripped) + # ...and remove any blank lines at the beginning and end of + # the whole string + return normalized.strip() + + +def stringify_ast(node: ast.AST) -> Iterator[str]: + """Simple visitor generating strings to compare ASTs by content.""" + return _stringify_ast(node, []) + + +def _stringify_ast_with_new_parent( + node: ast.AST, parent_stack: list[ast.AST], new_parent: ast.AST +) -> Iterator[str]: + parent_stack.append(new_parent) + yield from _stringify_ast(node, parent_stack) + parent_stack.pop() + + +def _stringify_ast(node: ast.AST, parent_stack: list[ast.AST]) -> Iterator[str]: + if ( + isinstance(node, ast.Constant) + and isinstance(node.value, str) + and node.kind == "u" + ): + # It's a quirk of history that we strip the u prefix over here. We used to + # rewrite the AST nodes for Python version compatibility and we never copied + # over the kind + node.kind = None + + yield f"{' ' * len(parent_stack)}{node.__class__.__name__}(" + + for field in sorted(node._fields): + # TypeIgnore has only one field 'lineno' which breaks this comparison + if isinstance(node, ast.TypeIgnore): + break + + try: + value: object = getattr(node, field) + except AttributeError: + continue + + yield f"{' ' * (len(parent_stack) + 1)}{field}=" + + if isinstance(value, list): + for item in value: + # Ignore nested tuples within del statements, because we may insert + # parentheses and they change the AST. + if ( + field == "targets" + and isinstance(node, ast.Delete) + and isinstance(item, ast.Tuple) + ): + for elt in _unwrap_tuples(item): + yield from _stringify_ast_with_new_parent( + elt, parent_stack, node + ) + + elif isinstance(item, ast.AST): + yield from _stringify_ast_with_new_parent(item, parent_stack, node) + + elif isinstance(value, ast.AST): + yield from _stringify_ast_with_new_parent(value, parent_stack, node) + + else: + normalized: object + if ( + isinstance(node, ast.Constant) + and field == "value" + and isinstance(value, str) + and len(parent_stack) >= 2 + # Any standalone string, ideally this would + # exactly match black.nodes.is_docstring + and isinstance(parent_stack[-1], ast.Expr) + ): + # Constant strings may be indented across newlines, if they are + # docstrings; fold spaces after newlines when comparing. Similarly, + # trailing and leading space may be removed. + normalized = _normalize("\n", value) + elif field == "type_comment" and isinstance(value, str): + # Trailing whitespace in type comments is removed. + normalized = value.rstrip() + else: + normalized = value + yield ( + f"{' ' * (len(parent_stack) + 1)}{normalized!r}, #" + f" {value.__class__.__name__}" + ) + + yield f"{' ' * len(parent_stack)}) # /{node.__class__.__name__}" + + +def _unwrap_tuples(node: ast.Tuple) -> Iterator[ast.AST]: + for elt in node.elts: + if isinstance(elt, ast.Tuple): + yield from _unwrap_tuples(elt) + else: + yield elt diff --git a/py311/lib/python3.11/site-packages/black/py.typed b/py311/lib/python3.11/site-packages/black/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/black/ranges.cpython-311-x86_64-linux-gnu.so b/py311/lib/python3.11/site-packages/black/ranges.cpython-311-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..d2b8e1858d2347f5ec2cbcd1cfe519ee927978d4 Binary files /dev/null and b/py311/lib/python3.11/site-packages/black/ranges.cpython-311-x86_64-linux-gnu.so differ diff --git a/py311/lib/python3.11/site-packages/black/ranges.py b/py311/lib/python3.11/site-packages/black/ranges.py new file mode 100644 index 0000000000000000000000000000000000000000..d7e003db83f91c30cbc4f61bfc17cf812556c2f9 --- /dev/null +++ b/py311/lib/python3.11/site-packages/black/ranges.py @@ -0,0 +1,534 @@ +"""Functions related to Black's formatting by line ranges feature.""" + +import difflib +from collections.abc import Collection, Iterator, Sequence +from dataclasses import dataclass + +from black.nodes import ( + LN, + STANDALONE_COMMENT, + Leaf, + Node, + Visitor, + first_leaf, + furthest_ancestor_with_last_leaf, + last_leaf, + syms, +) +from blib2to3.pgen2.token import ASYNC, NEWLINE + + +def parse_line_ranges(line_ranges: Sequence[str]) -> list[tuple[int, int]]: + lines: list[tuple[int, int]] = [] + for lines_str in line_ranges: + parts = lines_str.split("-") + if len(parts) != 2: + raise ValueError( + "Incorrect --line-ranges format, expect 'START-END', found" + f" {lines_str!r}" + ) + try: + start = int(parts[0]) + end = int(parts[1]) + except ValueError: + raise ValueError( + "Incorrect --line-ranges value, expect integer ranges, found" + f" {lines_str!r}" + ) from None + else: + lines.append((start, end)) + return lines + + +def is_valid_line_range(lines: tuple[int, int]) -> bool: + """Returns whether the line range is valid.""" + return not lines or lines[0] <= lines[1] + + +def sanitized_lines( + lines: Collection[tuple[int, int]], src_contents: str +) -> Collection[tuple[int, int]]: + """Returns the valid line ranges for the given source. + + This removes ranges that are entirely outside the valid lines. + + Other ranges are normalized so that the start values are at least 1 and the + end values are at most the (1-based) index of the last source line. + """ + if not src_contents: + return [] + good_lines = [] + src_line_count = src_contents.count("\n") + if not src_contents.endswith("\n"): + src_line_count += 1 + for start, end in lines: + if start > src_line_count: + continue + # line-ranges are 1-based + start = max(start, 1) + if end < start: + continue + end = min(end, src_line_count) + good_lines.append((start, end)) + return good_lines + + +def adjusted_lines( + lines: Collection[tuple[int, int]], + original_source: str, + modified_source: str, +) -> list[tuple[int, int]]: + """Returns the adjusted line ranges based on edits from the original code. + + This computes the new line ranges by diffing original_source and + modified_source, and adjust each range based on how the range overlaps with + the diffs. + + Note the diff can contain lines outside of the original line ranges. This can + happen when the formatting has to be done in adjacent to maintain consistent + local results. For example: + + 1. def my_func(arg1, arg2, + 2. arg3,): + 3. pass + + If it restricts to line 2-2, it can't simply reformat line 2, it also has + to reformat line 1: + + 1. def my_func( + 2. arg1, + 3. arg2, + 4. arg3, + 5. ): + 6. pass + + In this case, we will expand the line ranges to also include the whole diff + block. + + Args: + lines: a collection of line ranges. + original_source: the original source. + modified_source: the modified source. + """ + lines_mappings = _calculate_lines_mappings(original_source, modified_source) + + new_lines = [] + # Keep an index of the current search. Since the lines and lines_mappings are + # sorted, this makes the search complexity linear. + current_mapping_index = 0 + for start, end in sorted(lines): + start_mapping_index = _find_lines_mapping_index( + start, + lines_mappings, + current_mapping_index, + ) + end_mapping_index = _find_lines_mapping_index( + end, + lines_mappings, + start_mapping_index, + ) + current_mapping_index = start_mapping_index + if start_mapping_index >= len(lines_mappings) or end_mapping_index >= len( + lines_mappings + ): + # Protect against invalid inputs. + continue + start_mapping = lines_mappings[start_mapping_index] + end_mapping = lines_mappings[end_mapping_index] + if start_mapping.is_changed_block: + # When the line falls into a changed block, expands to the whole block. + new_start = start_mapping.modified_start + else: + new_start = ( + start - start_mapping.original_start + start_mapping.modified_start + ) + if end_mapping.is_changed_block: + # When the line falls into a changed block, expands to the whole block. + new_end = end_mapping.modified_end + else: + new_end = end - end_mapping.original_start + end_mapping.modified_start + new_range = (new_start, new_end) + if is_valid_line_range(new_range): + new_lines.append(new_range) + return new_lines + + +def convert_unchanged_lines(src_node: Node, lines: Collection[tuple[int, int]]) -> None: + r"""Converts unchanged lines to STANDALONE_COMMENT. + + The idea is similar to how `# fmt: on/off` is implemented. It also converts the + nodes between those markers as a single `STANDALONE_COMMENT` leaf node with + the unformatted code as its value. `STANDALONE_COMMENT` is a "fake" token + that will be formatted as-is with its prefix normalized. + + Here we perform two passes: + + 1. Visit the top-level statements, and convert them to a single + `STANDALONE_COMMENT` when unchanged. This speeds up formatting when some + of the top-level statements aren't changed. + 2. Convert unchanged "unwrapped lines" to `STANDALONE_COMMENT` nodes line by + line. "unwrapped lines" are divided by the `NEWLINE` token. e.g. a + multi-line statement is *one* "unwrapped line" that ends with `NEWLINE`, + even though this statement itself can span multiple lines, and the + tokenizer only sees the last '\n' as the `NEWLINE` token. + + NOTE: During pass (2), comment prefixes and indentations are ALWAYS + normalized even when the lines aren't changed. This is fixable by moving + more formatting to pass (1). However, it's hard to get it correct when + incorrect indentations are used. So we defer this to future optimizations. + """ + lines_set: set[int] = set() + for start, end in lines: + lines_set.update(range(start, end + 1)) + visitor = _TopLevelStatementsVisitor(lines_set) + _ = list(visitor.visit(src_node)) # Consume all results. + _convert_unchanged_line_by_line(src_node, lines_set) + + +def _contains_standalone_comment(node: LN) -> bool: + if isinstance(node, Leaf): + return node.type == STANDALONE_COMMENT + else: + for child in node.children: + if _contains_standalone_comment(child): + return True + return False + + +class _TopLevelStatementsVisitor(Visitor[None]): + """ + A node visitor that converts unchanged top-level statements to + STANDALONE_COMMENT. + + This is used in addition to _convert_unchanged_line_by_line, to + speed up formatting when there are unchanged top-level + classes/functions/statements. + """ + + def __init__(self, lines_set: set[int]): + self._lines_set = lines_set + + def visit_simple_stmt(self, node: Node) -> Iterator[None]: + # This is only called for top-level statements, since `visit_suite` + # won't visit its children nodes. + yield from [] + newline_leaf = last_leaf(node) + if not newline_leaf: + return + assert ( + newline_leaf.type == NEWLINE + ), f"Unexpectedly found leaf.type={newline_leaf.type}" + # We need to find the furthest ancestor with the NEWLINE as the last + # leaf, since a `suite` can simply be a `simple_stmt` when it puts + # its body on the same line. Example: `if cond: pass`. + ancestor = furthest_ancestor_with_last_leaf(newline_leaf) + if not _get_line_range(ancestor).intersection(self._lines_set): + _convert_node_to_standalone_comment(ancestor) + + def visit_suite(self, node: Node) -> Iterator[None]: + yield from [] + # If there is a STANDALONE_COMMENT node, it means parts of the node tree + # have fmt on/off/skip markers. Those STANDALONE_COMMENT nodes can't + # be simply converted by calling str(node). So we just don't convert + # here. + if _contains_standalone_comment(node): + return + # Find the semantic parent of this suite. For `async_stmt` and + # `async_funcdef`, the ASYNC token is defined on a separate level by the + # grammar. + semantic_parent = node.parent + if semantic_parent is not None: + if ( + semantic_parent.prev_sibling is not None + and semantic_parent.prev_sibling.type == ASYNC + ): + semantic_parent = semantic_parent.parent + if semantic_parent is not None and not _get_line_range( + semantic_parent + ).intersection(self._lines_set): + _convert_node_to_standalone_comment(semantic_parent) + + +def _convert_unchanged_line_by_line(node: Node, lines_set: set[int]) -> None: + """Converts unchanged to STANDALONE_COMMENT line by line.""" + for leaf in node.leaves(): + if leaf.type != NEWLINE: + # We only consider "unwrapped lines", which are divided by the NEWLINE + # token. + continue + if leaf.parent and leaf.parent.type == syms.match_stmt: + # The `suite` node is defined as: + # match_stmt: "match" subject_expr ':' NEWLINE INDENT case_block+ DEDENT + # Here we need to check `subject_expr`. The `case_block+` will be + # checked by their own NEWLINEs. + nodes_to_ignore: list[LN] = [] + prev_sibling = leaf.prev_sibling + while prev_sibling: + nodes_to_ignore.insert(0, prev_sibling) + prev_sibling = prev_sibling.prev_sibling + if not _get_line_range(nodes_to_ignore).intersection(lines_set): + _convert_nodes_to_standalone_comment(nodes_to_ignore, newline=leaf) + elif leaf.parent and leaf.parent.type == syms.suite: + # The `suite` node is defined as: + # suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT + # We will check `simple_stmt` and `stmt+` separately against the lines set + parent_sibling = leaf.parent.prev_sibling + nodes_to_ignore = [] + while parent_sibling and parent_sibling.type != syms.suite: + # NOTE: Multiple suite nodes can exist as siblings in e.g. `if_stmt`. + nodes_to_ignore.insert(0, parent_sibling) + parent_sibling = parent_sibling.prev_sibling + # Special case for `async_stmt` and `async_funcdef` where the ASYNC + # token is on the grandparent node. + grandparent = leaf.parent.parent + if ( + grandparent is not None + and grandparent.prev_sibling is not None + and grandparent.prev_sibling.type == ASYNC + ): + nodes_to_ignore.insert(0, grandparent.prev_sibling) + if not _get_line_range(nodes_to_ignore).intersection(lines_set): + _convert_nodes_to_standalone_comment(nodes_to_ignore, newline=leaf) + else: + ancestor = furthest_ancestor_with_last_leaf(leaf) + # Consider multiple decorators as a whole block, as their + # newlines have different behaviors than the rest of the grammar. + if ( + ancestor.type == syms.decorator + and ancestor.parent + and ancestor.parent.type == syms.decorators + ): + ancestor = ancestor.parent + if not _get_line_range(ancestor).intersection(lines_set): + _convert_node_to_standalone_comment(ancestor) + + +def _convert_node_to_standalone_comment(node: LN) -> None: + """Convert node to STANDALONE_COMMENT by modifying the tree inline.""" + parent = node.parent + if not parent: + return + first = first_leaf(node) + last = last_leaf(node) + if not first or not last: + return + if first is last: + # This can happen on the following edge cases: + # 1. A block of `# fmt: off/on` code except the `# fmt: on` is placed + # on the end of the last line instead of on a new line. + # 2. A single backslash on its own line followed by a comment line. + # Ideally we don't want to format them when not requested, but fixing + # isn't easy. These cases are also badly formatted code, so it isn't + # too bad we reformat them. + return + # The prefix contains comments and indentation whitespaces. They are + # reformatted accordingly to the correct indentation level. + # This also means the indentation will be changed on the unchanged lines, and + # this is actually required to not break incremental reformatting. + prefix = first.prefix + first.prefix = "" + index = node.remove() + if index is not None: + # Because of the special handling of multiple decorators, if the decorated + # item is a single line then there will be a missing newline between the + # decorator and item, so add it back. This doesn't affect any other case + # since a decorated item with a newline would hit the earlier suite case + # in _convert_unchanged_line_by_line that correctly handles the newlines. + if node.type == syms.decorated: + # A leaf of type decorated wouldn't make sense, since it should always + # have at least the decorator + the decorated item, so if this assert + # hits that means there's a problem in the parser. + assert isinstance(node, Node) + # 1 will always be the correct index since before this function is + # called all the decorators are collapsed into a single leaf + node.insert_child(1, Leaf(NEWLINE, "\n")) + # Remove the '\n', as STANDALONE_COMMENT will have '\n' appended when + # generating the formatted code. + value = str(node)[:-1] + parent.insert_child( + index, + Leaf( + STANDALONE_COMMENT, + value, + prefix=prefix, + fmt_pass_converted_first_leaf=first, + ), + ) + + +def _convert_nodes_to_standalone_comment(nodes: Sequence[LN], *, newline: Leaf) -> None: + """Convert nodes to STANDALONE_COMMENT by modifying the tree inline.""" + if not nodes: + return + parent = nodes[0].parent + first = first_leaf(nodes[0]) + if not parent or not first: + return + prefix = first.prefix + first.prefix = "" + value = "".join(str(node) for node in nodes) + # The prefix comment on the NEWLINE leaf is the trailing comment of the statement. + if newline.prefix: + value += newline.prefix + newline.prefix = "" + index = nodes[0].remove() + for node in nodes[1:]: + node.remove() + if index is not None: + parent.insert_child( + index, + Leaf( + STANDALONE_COMMENT, + value, + prefix=prefix, + fmt_pass_converted_first_leaf=first, + ), + ) + + +def _leaf_line_end(leaf: Leaf) -> int: + """Returns the line number of the leaf node's last line.""" + if leaf.type == NEWLINE: + return leaf.lineno + else: + # Leaf nodes like multiline strings can occupy multiple lines. + return leaf.lineno + str(leaf).count("\n") + + +def _get_line_range(node_or_nodes: LN | list[LN]) -> set[int]: + """Returns the line range of this node or list of nodes.""" + if isinstance(node_or_nodes, list): + nodes = node_or_nodes + if not nodes: + return set() + first = first_leaf(nodes[0]) + last = last_leaf(nodes[-1]) + if first and last: + line_start = first.lineno + line_end = _leaf_line_end(last) + return set(range(line_start, line_end + 1)) + else: + return set() + else: + node = node_or_nodes + if isinstance(node, Leaf): + return set(range(node.lineno, _leaf_line_end(node) + 1)) + else: + first = first_leaf(node) + last = last_leaf(node) + if first and last: + return set(range(first.lineno, _leaf_line_end(last) + 1)) + else: + return set() + + +@dataclass +class _LinesMapping: + """1-based lines mapping from original source to modified source. + + Lines [original_start, original_end] from original source + are mapped to [modified_start, modified_end]. + + The ranges are inclusive on both ends. + """ + + original_start: int + original_end: int + modified_start: int + modified_end: int + # Whether this range corresponds to a changed block, or an unchanged block. + is_changed_block: bool + + +def _calculate_lines_mappings( + original_source: str, + modified_source: str, +) -> Sequence[_LinesMapping]: + """Returns a sequence of _LinesMapping by diffing the sources. + + For example, given the following diff: + import re + - def func(arg1, + - arg2, arg3): + + def func(arg1, arg2, arg3): + pass + It returns the following mappings: + original -> modified + (1, 1) -> (1, 1), is_changed_block=False (the "import re" line) + (2, 3) -> (2, 2), is_changed_block=True (the diff) + (4, 4) -> (3, 3), is_changed_block=False (the "pass" line) + + You can think of this visually as if it brings up a side-by-side diff, and tries + to map the line ranges from the left side to the right side: + + (1, 1)->(1, 1) 1. import re 1. import re + (2, 3)->(2, 2) 2. def func(arg1, 2. def func(arg1, arg2, arg3): + 3. arg2, arg3): + (4, 4)->(3, 3) 4. pass 3. pass + + Args: + original_source: the original source. + modified_source: the modified source. + """ + matcher = difflib.SequenceMatcher( + None, + original_source.splitlines(keepends=True), + modified_source.splitlines(keepends=True), + ) + matching_blocks = matcher.get_matching_blocks() + lines_mappings: list[_LinesMapping] = [] + # matching_blocks is a sequence of "same block of code ranges", see + # https://docs.python.org/3/library/difflib.html#difflib.SequenceMatcher.get_matching_blocks + # Each block corresponds to a _LinesMapping with is_changed_block=False, + # and the ranges between two blocks corresponds to a _LinesMapping with + # is_changed_block=True, + # NOTE: matching_blocks is 0-based, but _LinesMapping is 1-based. + for i, block in enumerate(matching_blocks): + if i == 0: + if block.a != 0 or block.b != 0: + lines_mappings.append( + _LinesMapping( + original_start=1, + original_end=block.a, + modified_start=1, + modified_end=block.b, + is_changed_block=False, + ) + ) + else: + previous_block = matching_blocks[i - 1] + lines_mappings.append( + _LinesMapping( + original_start=previous_block.a + previous_block.size + 1, + original_end=block.a, + modified_start=previous_block.b + previous_block.size + 1, + modified_end=block.b, + is_changed_block=True, + ) + ) + if i < len(matching_blocks) - 1: + lines_mappings.append( + _LinesMapping( + original_start=block.a + 1, + original_end=block.a + block.size, + modified_start=block.b + 1, + modified_end=block.b + block.size, + is_changed_block=False, + ) + ) + return lines_mappings + + +def _find_lines_mapping_index( + original_line: int, + lines_mappings: Sequence[_LinesMapping], + start_index: int, +) -> int: + """Returns the original index of the lines mappings for the original line.""" + index = start_index + while index < len(lines_mappings): + mapping = lines_mappings[index] + if mapping.original_start <= original_line <= mapping.original_end: + return index + index += 1 + return index diff --git a/py311/lib/python3.11/site-packages/black/report.py b/py311/lib/python3.11/site-packages/black/report.py new file mode 100644 index 0000000000000000000000000000000000000000..89899f2f38996f309f79dc1225e24bfa8c69767a --- /dev/null +++ b/py311/lib/python3.11/site-packages/black/report.py @@ -0,0 +1,107 @@ +""" +Summarize Black runs to users. +""" + +from dataclasses import dataclass +from enum import Enum +from pathlib import Path + +from click import style + +from black.output import err, out + + +class Changed(Enum): + NO = 0 + CACHED = 1 + YES = 2 + + +class NothingChanged(UserWarning): + """Raised when reformatted code is the same as source.""" + + +@dataclass +class Report: + """Provides a reformatting counter. Can be rendered with `str(report)`.""" + + check: bool = False + diff: bool = False + quiet: bool = False + verbose: bool = False + change_count: int = 0 + same_count: int = 0 + failure_count: int = 0 + + def done(self, src: Path, changed: Changed) -> None: + """Increment the counter for successful reformatting. Write out a message.""" + if changed is Changed.YES: + reformatted = "would reformat" if self.check or self.diff else "reformatted" + if self.verbose or not self.quiet: + out(f"{reformatted} {src}") + self.change_count += 1 + else: + if self.verbose: + if changed is Changed.NO: + msg = f"{src} already well formatted, good job." + else: + msg = f"{src} wasn't modified on disk since last run." + out(msg, bold=False) + self.same_count += 1 + + def failed(self, src: Path, message: str) -> None: + """Increment the counter for failed reformatting. Write out a message.""" + err(f"error: cannot format {src}: {message}") + self.failure_count += 1 + + def path_ignored(self, path: Path, message: str) -> None: + if self.verbose: + out(f"{path} ignored: {message}", bold=False) + + @property + def return_code(self) -> int: + """Return the exit code that the app should use. + + This considers the current state of changed files and failures: + - if there were any failures, return 123; + - if any files were changed and --check is being used, return 1; + - otherwise return 0. + """ + # According to http://tldp.org/LDP/abs/html/exitcodes.html starting with + # 126 we have special return codes reserved by the shell. + if self.failure_count: + return 123 + + elif self.change_count and self.check: + return 1 + + return 0 + + def __str__(self) -> str: + """Render a color report of the current state. + + Use `click.unstyle` to remove colors. + """ + if self.check or self.diff: + reformatted = "would be reformatted" + unchanged = "would be left unchanged" + failed = "would fail to reformat" + else: + reformatted = "reformatted" + unchanged = "left unchanged" + failed = "failed to reformat" + report = [] + if self.change_count: + s = "s" if self.change_count > 1 else "" + report.append( + style(f"{self.change_count} file{s} ", bold=True, fg="blue") + + style(f"{reformatted}", bold=True) + ) + + if self.same_count: + s = "s" if self.same_count > 1 else "" + report.append(style(f"{self.same_count} file{s} ", fg="blue") + unchanged) + if self.failure_count: + s = "s" if self.failure_count > 1 else "" + report.append(style(f"{self.failure_count} file{s} {failed}", fg="red")) + return ", ".join(report) + "." diff --git a/py311/lib/python3.11/site-packages/black/resources/__init__.cpython-311-x86_64-linux-gnu.so b/py311/lib/python3.11/site-packages/black/resources/__init__.cpython-311-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..b8417ed89fe0bfecdb8ea0c651622faee23e4fe2 Binary files /dev/null and b/py311/lib/python3.11/site-packages/black/resources/__init__.cpython-311-x86_64-linux-gnu.so differ diff --git a/py311/lib/python3.11/site-packages/black/resources/__init__.py b/py311/lib/python3.11/site-packages/black/resources/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/black/resources/black.schema.json b/py311/lib/python3.11/site-packages/black/resources/black.schema.json new file mode 100644 index 0000000000000000000000000000000000000000..bed70a4bb22cad8797c8d1735ce575630ad8c3e4 --- /dev/null +++ b/py311/lib/python3.11/site-packages/black/resources/black.schema.json @@ -0,0 +1,161 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "https://github.com/psf/black/blob/main/src/black/resources/black.schema.json", + "$comment": "tool.black table in pyproject.toml", + "type": "object", + "additionalProperties": false, + "properties": { + "code": { + "type": "string", + "description": "Format the code passed in as a string." + }, + "line-length": { + "type": "integer", + "description": "How many characters per line to allow.", + "default": 88 + }, + "target-version": { + "type": "array", + "items": { + "enum": [ + "py33", + "py34", + "py35", + "py36", + "py37", + "py38", + "py39", + "py310", + "py311", + "py312", + "py313", + "py314" + ] + }, + "description": "Python versions that should be supported by Black's output. You should include all versions that your code supports. By default, Black will infer target versions from the project metadata in pyproject.toml. If this does not yield conclusive results, Black will use per-file auto-detection." + }, + "pyi": { + "type": "boolean", + "description": "Format all input files like typing stubs regardless of file extension. This is useful when piping source on standard input.", + "default": false + }, + "ipynb": { + "type": "boolean", + "description": "Format all input files like Jupyter Notebooks regardless of file extension. This is useful when piping source on standard input.", + "default": false + }, + "python-cell-magics": { + "type": "array", + "items": { + "type": "string" + }, + "description": "When processing Jupyter Notebooks, add the given magic to the list of known python-magics (capture, prun, pypy, python, python3, time, timeit). Useful for formatting cells with custom python magics." + }, + "skip-source-first-line": { + "type": "boolean", + "description": "Skip the first line of the source code.", + "default": false + }, + "skip-string-normalization": { + "type": "boolean", + "description": "Don't normalize string quotes or prefixes.", + "default": false + }, + "skip-magic-trailing-comma": { + "type": "boolean", + "description": "Don't use trailing commas as a reason to split lines.", + "default": false + }, + "preview": { + "type": "boolean", + "description": "Enable potentially disruptive style changes that may be added to Black's main functionality in the next major release.", + "default": false + }, + "unstable": { + "type": "boolean", + "description": "Enable potentially disruptive style changes that have known bugs or are not currently expected to make it into the stable style Black's next major release. Implies --preview.", + "default": false + }, + "enable-unstable-feature": { + "type": "array", + "items": { + "enum": [ + "string_processing", + "hug_parens_with_braces_and_square_brackets", + "wrap_long_dict_values_in_parens", + "multiline_string_handling", + "always_one_newline_after_import", + "fix_fmt_skip_in_one_liners", + "standardize_type_comments", + "wrap_comprehension_in", + "remove_parens_around_except_types", + "normalize_cr_newlines", + "fix_module_docstring_detection", + "fix_type_expansion_split", + "remove_parens_from_assignment_lhs" + ] + }, + "description": "Enable specific features included in the `--unstable` style. Requires `--preview`. No compatibility guarantees are provided on the behavior or existence of any unstable features." + }, + "check": { + "type": "boolean", + "description": "Don't write the files back, just return the status. Return code 0 means nothing would change. Return code 1 means some files would be reformatted. Return code 123 means there was an internal error.", + "default": false + }, + "diff": { + "type": "boolean", + "description": "Don't write the files back, just output a diff to indicate what changes Black would've made. They are printed to stdout so capturing them is simple.", + "default": false + }, + "color": { + "type": "boolean", + "description": "Show (or do not show) colored diff. Only applies when --diff is given.", + "default": false + }, + "fast": { + "type": "boolean", + "description": "By default, Black performs an AST safety check after formatting your code. The --fast flag turns off this check and the --safe flag explicitly enables it. [default: --safe]", + "default": false + }, + "required-version": { + "type": "string", + "description": "Require a specific version of Black to be running. This is useful for ensuring that all contributors to your project are using the same version, because different versions of Black may format code a little differently. This option can be set in a configuration file for consistent results across environments." + }, + "exclude": { + "type": "string", + "description": "A regular expression that matches files and directories that should be excluded on recursive searches. An empty value means no paths are excluded. Use forward slashes for directories on all platforms (Windows, too). By default, Black also ignores all paths listed in .gitignore. Changing this value will override all default exclusions. [default: /(\\.direnv|\\.eggs|\\.git|\\.hg|\\.ipynb_checkpoints|\\.mypy_cache|\\.nox|\\.pytest_cache|\\.ruff_cache|\\.tox|\\.svn|\\.venv|\\.vscode|__pypackages__|_build|buck-out|build|dist|venv)/]" + }, + "extend-exclude": { + "type": "string", + "description": "Like --exclude, but adds additional files and directories on top of the default values instead of overriding them." + }, + "force-exclude": { + "type": "string", + "description": "Like --exclude, but files and directories matching this regex will be excluded even when they are passed explicitly as arguments. This is useful when invoking Black programmatically on changed files, such as in a pre-commit hook or editor plugin." + }, + "include": { + "type": "string", + "description": "A regular expression that matches files and directories that should be included on recursive searches. An empty value means all files are included regardless of the name. Use forward slashes for directories on all platforms (Windows, too). Overrides all exclusions, including from .gitignore and command line options.", + "default": "(\\.pyi?|\\.ipynb)$" + }, + "workers": { + "type": "integer", + "description": "When Black formats multiple files, it may use a process pool to speed up formatting. This option controls the number of parallel workers. This can also be specified via the BLACK_NUM_WORKERS environment variable. Defaults to the number of CPUs in the system." + }, + "quiet": { + "type": "boolean", + "description": "Stop emitting all non-critical output. Error messages will still be emitted (which can silenced by 2>/dev/null).", + "default": false + }, + "verbose": { + "type": "boolean", + "description": "Emit messages about files that were not changed or were ignored due to exclusion patterns. If Black is using a configuration file, a message detailing which one it is using will be emitted.", + "default": false + }, + "no-cache": { + "type": "boolean", + "description": "Skip reading and writing the cache, forcing Black to reformat all included files.", + "default": false + } + } +} diff --git a/py311/lib/python3.11/site-packages/black/rusty.cpython-311-x86_64-linux-gnu.so b/py311/lib/python3.11/site-packages/black/rusty.cpython-311-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..a4428b5a869d16a4fbd7253de5d3c1fa2315e05a Binary files /dev/null and b/py311/lib/python3.11/site-packages/black/rusty.cpython-311-x86_64-linux-gnu.so differ diff --git a/py311/lib/python3.11/site-packages/black/rusty.py b/py311/lib/python3.11/site-packages/black/rusty.py new file mode 100644 index 0000000000000000000000000000000000000000..ebd4c052d1f37b9eed01d329143c15bfbe0f0e74 --- /dev/null +++ b/py311/lib/python3.11/site-packages/black/rusty.py @@ -0,0 +1,28 @@ +"""An error-handling model influenced by that used by the Rust programming language + +See https://doc.rust-lang.org/book/ch09-00-error-handling.html. +""" + +from typing import Generic, TypeVar, Union + +T = TypeVar("T") +E = TypeVar("E", bound=Exception) + + +class Ok(Generic[T]): + def __init__(self, value: T) -> None: + self._value = value + + def ok(self) -> T: + return self._value + + +class Err(Generic[E]): + def __init__(self, e: E) -> None: + self._e = e + + def err(self) -> E: + return self._e + + +Result = Union[Ok[T], Err[E]] diff --git a/py311/lib/python3.11/site-packages/black/schema.cpython-311-x86_64-linux-gnu.so b/py311/lib/python3.11/site-packages/black/schema.cpython-311-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..fd9630b10702bc0e06720f122104723df8649a00 Binary files /dev/null and b/py311/lib/python3.11/site-packages/black/schema.cpython-311-x86_64-linux-gnu.so differ diff --git a/py311/lib/python3.11/site-packages/black/schema.py b/py311/lib/python3.11/site-packages/black/schema.py new file mode 100644 index 0000000000000000000000000000000000000000..f534dbb028d579cfaf34efda8c75bdf7e584c1e4 --- /dev/null +++ b/py311/lib/python3.11/site-packages/black/schema.py @@ -0,0 +1,15 @@ +import importlib.resources +import json +from typing import Any + + +def get_schema(tool_name: str = "black") -> Any: + """Get the stored complete schema for black's settings.""" + assert tool_name == "black", "Only black is supported." + + pkg = "black.resources" + fname = "black.schema.json" + + schema = importlib.resources.files(pkg).joinpath(fname) + with schema.open(encoding="utf-8") as f: + return json.load(f) diff --git a/py311/lib/python3.11/site-packages/black/strings.cpython-311-x86_64-linux-gnu.so b/py311/lib/python3.11/site-packages/black/strings.cpython-311-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..ac6bd9753dce56473d8806c0f4a4bef0a4bcdeab Binary files /dev/null and b/py311/lib/python3.11/site-packages/black/strings.cpython-311-x86_64-linux-gnu.so differ diff --git a/py311/lib/python3.11/site-packages/black/strings.py b/py311/lib/python3.11/site-packages/black/strings.py new file mode 100644 index 0000000000000000000000000000000000000000..78c9f258fe2f770cd10fd1fdc24b0b104ab662ee --- /dev/null +++ b/py311/lib/python3.11/site-packages/black/strings.py @@ -0,0 +1,389 @@ +""" +Simple formatting on strings. Further string formatting code is in trans.py. +""" + +import re +import sys +from functools import lru_cache +from re import Match, Pattern +from typing import Final + +from black._width_table import WIDTH_TABLE +from blib2to3.pytree import Leaf + +STRING_PREFIX_CHARS: Final = "fturbFTURB" # All possible string prefix characters. +STRING_PREFIX_RE: Final = re.compile( + r"^([" + STRING_PREFIX_CHARS + r"]*)(.*)$", re.DOTALL +) +UNICODE_ESCAPE_RE: Final = re.compile( + r"(?P\\+)(?P" + r"(u(?P[a-fA-F0-9]{4}))" # Character with 16-bit hex value xxxx + r"|(U(?P[a-fA-F0-9]{8}))" # Character with 32-bit hex value xxxxxxxx + r"|(x(?P[a-fA-F0-9]{2}))" # Character with hex value hh + r"|(N\{(?P[a-zA-Z0-9 \-]{2,})\})" # Character named name in the Unicode database + r")", + re.VERBOSE, +) + + +def sub_twice(regex: Pattern[str], replacement: str, original: str) -> str: + """Replace `regex` with `replacement` twice on `original`. + + This is used by string normalization to perform replaces on + overlapping matches. + """ + return regex.sub(replacement, regex.sub(replacement, original)) + + +def has_triple_quotes(string: str) -> bool: + """ + Returns: + True iff @string starts with three quotation characters. + """ + raw_string = string.lstrip(STRING_PREFIX_CHARS) + return raw_string[:3] in {'"""', "'''"} + + +def lines_with_leading_tabs_expanded(s: str) -> list[str]: + """ + Splits string into lines and expands only leading tabs (following the normal + Python rules) + """ + lines = [] + for line in s.splitlines(): + stripped_line = line.lstrip() + if not stripped_line or stripped_line == line: + lines.append(line) + else: + prefix_length = len(line) - len(stripped_line) + prefix = line[:prefix_length].expandtabs() + lines.append(prefix + stripped_line) + if s.endswith("\n"): + lines.append("") + return lines + + +def fix_multiline_docstring(docstring: str, prefix: str) -> str: + # https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation + assert docstring, "INTERNAL ERROR: Multiline docstrings cannot be empty" + lines = lines_with_leading_tabs_expanded(docstring) + # Determine minimum indentation (first line doesn't count): + indent = sys.maxsize + for line in lines[1:]: + stripped = line.lstrip() + if stripped: + indent = min(indent, len(line) - len(stripped)) + # Remove indentation (first line is special): + trimmed = [lines[0].strip()] + if indent < sys.maxsize: + last_line_idx = len(lines) - 2 + for i, line in enumerate(lines[1:]): + stripped_line = line[indent:].rstrip() + if stripped_line or i == last_line_idx: + trimmed.append(prefix + stripped_line) + else: + trimmed.append("") + return "\n".join(trimmed) + + +def get_string_prefix(string: str) -> str: + """ + Pre-conditions: + * assert_is_leaf_string(@string) + + Returns: + @string's prefix (e.g. '', 'r', 'f', or 'rf'). + """ + assert_is_leaf_string(string) + + prefix = [] + for char in string: + if char in STRING_PREFIX_CHARS: + prefix.append(char) + else: + break + return "".join(prefix) + + +def assert_is_leaf_string(string: str) -> None: + """ + Checks the pre-condition that @string has the format that you would expect + of `leaf.value` where `leaf` is some Leaf such that `leaf.type == + token.STRING`. A more precise description of the pre-conditions that are + checked are listed below. + + Pre-conditions: + * @string starts with either ', ", ', or " where + `set()` is some subset of `set(STRING_PREFIX_CHARS)`. + * @string ends with a quote character (' or "). + + Raises: + AssertionError(...) if the pre-conditions listed above are not + satisfied. + """ + dquote_idx = string.find('"') + squote_idx = string.find("'") + if -1 in [dquote_idx, squote_idx]: + quote_idx = max(dquote_idx, squote_idx) + else: + quote_idx = min(squote_idx, dquote_idx) + + assert ( + 0 <= quote_idx < len(string) - 1 + ), f"{string!r} is missing a starting quote character (' or \")." + assert string[-1] in ( + "'", + '"', + ), f"{string!r} is missing an ending quote character (' or \")." + assert set(string[:quote_idx]).issubset( + set(STRING_PREFIX_CHARS) + ), f"{set(string[:quote_idx])} is NOT a subset of {set(STRING_PREFIX_CHARS)}." + + +def normalize_string_prefix(s: str) -> str: + """Make all string prefixes lowercase.""" + match = STRING_PREFIX_RE.match(s) + assert match is not None, f"failed to match string {s!r}" + orig_prefix = match.group(1) + new_prefix = ( + orig_prefix.replace("F", "f") + .replace("B", "b") + .replace("U", "") + .replace("u", "") + ) + + # Python syntax guarantees max 2 prefixes and that one of them is "r" + if len(new_prefix) == 2 and new_prefix[0].lower() != "r": + new_prefix = new_prefix[::-1] + return f"{new_prefix}{match.group(2)}" + + +# Re(gex) does actually cache patterns internally but this still improves +# performance on a long list literal of strings by 5-9% since lru_cache's +# caching overhead is much lower. +@lru_cache(maxsize=64) +def _cached_compile(pattern: str) -> Pattern[str]: + return re.compile(pattern) + + +def normalize_string_quotes(s: str) -> str: + """Prefer double quotes but only if it doesn't cause more escaping. + + Adds or removes backslashes as appropriate. + """ + value = s.lstrip(STRING_PREFIX_CHARS) + if value[:3] == '"""': + return s + + elif value[:3] == "'''": + orig_quote = "'''" + new_quote = '"""' + elif value[0] == '"': + orig_quote = '"' + new_quote = "'" + else: + orig_quote = "'" + new_quote = '"' + first_quote_pos = s.find(orig_quote) + assert first_quote_pos != -1, f"INTERNAL ERROR: Malformed string {s!r}" + + prefix = s[:first_quote_pos] + unescaped_new_quote = _cached_compile(rf"(([^\\]|^)(\\\\)*){new_quote}") + escaped_new_quote = _cached_compile(rf"([^\\]|^)\\((?:\\\\)*){new_quote}") + escaped_orig_quote = _cached_compile(rf"([^\\]|^)\\((?:\\\\)*){orig_quote}") + body = s[first_quote_pos + len(orig_quote) : -len(orig_quote)] + if "r" in prefix.casefold(): + if unescaped_new_quote.search(body): + # There's at least one unescaped new_quote in this raw string + # so converting is impossible + return s + + # Do not introduce or remove backslashes in raw strings + new_body = body + else: + # remove unnecessary escapes + new_body = sub_twice(escaped_new_quote, rf"\1\2{new_quote}", body) + if body != new_body: + # Consider the string without unnecessary escapes as the original + body = new_body + s = f"{prefix}{orig_quote}{body}{orig_quote}" + new_body = sub_twice(escaped_orig_quote, rf"\1\2{orig_quote}", new_body) + new_body = sub_twice(unescaped_new_quote, rf"\1\\{new_quote}", new_body) + + if "f" in prefix.casefold(): + matches = re.findall( + r""" + (?:(? orig_escape_count: + return s # Do not introduce more escaping + + if new_escape_count == orig_escape_count and orig_quote == '"': + return s # Prefer double quotes + + return f"{prefix}{new_quote}{new_body}{new_quote}" + + +def normalize_fstring_quotes( + quote: str, + middles: list[Leaf], + is_raw_fstring: bool, +) -> tuple[list[Leaf], str]: + """Prefer double quotes but only if it doesn't cause more escaping. + + Adds or removes backslashes as appropriate. + """ + if quote == '"""': + return middles, quote + + elif quote == "'''": + new_quote = '"""' + elif quote == '"': + new_quote = "'" + else: + new_quote = '"' + + unescaped_new_quote = _cached_compile(rf"(([^\\]|^)(\\\\)*){new_quote}") + escaped_new_quote = _cached_compile(rf"([^\\]|^)\\((?:\\\\)*){new_quote}") + escaped_orig_quote = _cached_compile(rf"([^\\]|^)\\((?:\\\\)*){quote}") + if is_raw_fstring: + for middle in middles: + if unescaped_new_quote.search(middle.value): + # There's at least one unescaped new_quote in this raw string + # so converting is impossible + return middles, quote + + # Do not introduce or remove backslashes in raw strings, just use double quote + return middles, '"' + + new_segments = [] + for middle in middles: + segment = middle.value + # remove unnecessary escapes + new_segment = sub_twice(escaped_new_quote, rf"\1\2{new_quote}", segment) + if segment != new_segment: + # Consider the string without unnecessary escapes as the original + middle.value = new_segment + + new_segment = sub_twice(escaped_orig_quote, rf"\1\2{quote}", new_segment) + new_segment = sub_twice(unescaped_new_quote, rf"\1\\{new_quote}", new_segment) + new_segments.append(new_segment) + + if new_quote == '"""' and new_segments[-1].endswith('"'): + # edge case: + new_segments[-1] = new_segments[-1][:-1] + '\\"' + + for middle, new_segment in zip(middles, new_segments, strict=True): + orig_escape_count = middle.value.count("\\") + new_escape_count = new_segment.count("\\") + + if new_escape_count > orig_escape_count: + return middles, quote # Do not introduce more escaping + + if new_escape_count == orig_escape_count and quote == '"': + return middles, quote # Prefer double quotes + + for middle, new_segment in zip(middles, new_segments, strict=True): + middle.value = new_segment + + return middles, new_quote + + +def normalize_unicode_escape_sequences(leaf: Leaf) -> None: + """Replace hex codes in Unicode escape sequences with lowercase representation.""" + text = leaf.value + prefix = get_string_prefix(text) + if "r" in prefix.lower(): + return + + def replace(m: Match[str]) -> str: + groups = m.groupdict() + back_slashes = groups["backslashes"] + + if len(back_slashes) % 2 == 0: + return back_slashes + groups["body"] + + if groups["u"]: + # \u + return back_slashes + "u" + groups["u"].lower() + elif groups["U"]: + # \U + return back_slashes + "U" + groups["U"].lower() + elif groups["x"]: + # \x + return back_slashes + "x" + groups["x"].lower() + else: + assert groups["N"], f"Unexpected match: {m}" + # \N{} + return back_slashes + "N{" + groups["N"].upper() + "}" + + leaf.value = re.sub(UNICODE_ESCAPE_RE, replace, text) + + +@lru_cache(maxsize=4096) +def char_width(char: str) -> int: + """Return the width of a single character as it would be displayed in a + terminal or editor (which respects Unicode East Asian Width). + + Full width characters are counted as 2, while half width characters are + counted as 1. Also control characters are counted as 0. + """ + table = WIDTH_TABLE + codepoint = ord(char) + highest = len(table) - 1 + lowest = 0 + idx = highest // 2 + while True: + start_codepoint, end_codepoint, width = table[idx] + if codepoint < start_codepoint: + highest = idx - 1 + elif codepoint > end_codepoint: + lowest = idx + 1 + else: + return 0 if width < 0 else width + if highest < lowest: + break + idx = (highest + lowest) // 2 + return 1 + + +def str_width(line_str: str) -> int: + """Return the width of `line_str` as it would be displayed in a terminal + or editor (which respects Unicode East Asian Width). + + You could utilize this function to determine, for example, if a string + is too wide to display in a terminal or editor. + """ + if line_str.isascii(): + # Fast path for a line consisting of only ASCII characters + return len(line_str) + return sum(map(char_width, line_str)) + + +def count_chars_in_width(line_str: str, max_width: int) -> int: + """Count the number of characters in `line_str` that would fit in a + terminal or editor of `max_width` (which respects Unicode East Asian + Width). + """ + total_width = 0 + for i, char in enumerate(line_str): + width = char_width(char) + if width + total_width > max_width: + return i + total_width += width + return len(line_str) diff --git a/py311/lib/python3.11/site-packages/black/trans.cpython-311-x86_64-linux-gnu.so b/py311/lib/python3.11/site-packages/black/trans.cpython-311-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..87c6cdd1b8302ba0ba13915916f9cc4cda0a2d28 Binary files /dev/null and b/py311/lib/python3.11/site-packages/black/trans.cpython-311-x86_64-linux-gnu.so differ diff --git a/py311/lib/python3.11/site-packages/black/trans.py b/py311/lib/python3.11/site-packages/black/trans.py new file mode 100644 index 0000000000000000000000000000000000000000..0cb6f6270c8de357005228a66efc238025da2717 --- /dev/null +++ b/py311/lib/python3.11/site-packages/black/trans.py @@ -0,0 +1,2511 @@ +""" +String transformers that can split and merge strings. +""" + +import re +from abc import ABC, abstractmethod +from collections import defaultdict +from collections.abc import Callable, Collection, Iterable, Iterator, Sequence +from dataclasses import dataclass +from typing import Any, ClassVar, Final, Literal, TypeVar, Union + +from mypy_extensions import trait + +from black.comments import contains_pragma_comment +from black.lines import Line, append_leaves +from black.mode import Feature, Mode +from black.nodes import ( + CLOSING_BRACKETS, + OPENING_BRACKETS, + STANDALONE_COMMENT, + is_empty_lpar, + is_empty_par, + is_empty_rpar, + is_part_of_annotation, + parent_type, + replace_child, + syms, +) +from black.rusty import Err, Ok, Result +from black.strings import ( + assert_is_leaf_string, + count_chars_in_width, + get_string_prefix, + has_triple_quotes, + normalize_string_quotes, + str_width, +) +from blib2to3.pgen2 import token +from blib2to3.pytree import Leaf, Node + + +class CannotTransform(Exception): + """Base class for errors raised by Transformers.""" + + +# types +T = TypeVar("T") +LN = Union[Leaf, Node] +Transformer = Callable[[Line, Collection[Feature], Mode], Iterator[Line]] +Index = int +NodeType = int +ParserState = int +StringID = int +TResult = Result[T, CannotTransform] # (T)ransform Result +TMatchResult = TResult[list[Index]] + +SPLIT_SAFE_CHARS = frozenset(["\u3001", "\u3002", "\uff0c"]) # East Asian stops + + +def TErr(err_msg: str) -> Err[CannotTransform]: + """(T)ransform Err + + Convenience function used when working with the TResult type. + """ + cant_transform = CannotTransform(err_msg) + return Err(cant_transform) + + +def hug_power_op( + line: Line, features: Collection[Feature], mode: Mode +) -> Iterator[Line]: + """A transformer which normalizes spacing around power operators.""" + + # Performance optimization to avoid unnecessary Leaf clones and other ops. + for leaf in line.leaves: + if leaf.type == token.DOUBLESTAR: + break + else: + raise CannotTransform("No doublestar token was found in the line.") + + def is_simple_lookup(index: int, kind: Literal[1, -1]) -> bool: + # Brackets and parentheses indicate calls, subscripts, etc. ... + # basically stuff that doesn't count as "simple". Only a NAME lookup + # or dotted lookup (eg. NAME.NAME) is OK. + if kind == -1: + return handle_is_simple_look_up_prev(line, index, {token.RPAR, token.RSQB}) + else: + return handle_is_simple_lookup_forward( + line, index, {token.LPAR, token.LSQB} + ) + + def is_simple_operand(index: int, kind: Literal[1, -1]) -> bool: + # An operand is considered "simple" if's a NAME, a numeric CONSTANT, a simple + # lookup (see above), with or without a preceding unary operator. + start = line.leaves[index] + if start.type in {token.NAME, token.NUMBER}: + return is_simple_lookup(index, kind) + + if start.type in {token.PLUS, token.MINUS, token.TILDE}: + if line.leaves[index + 1].type in {token.NAME, token.NUMBER}: + # kind is always one as bases with a preceding unary op will be checked + # for simplicity starting from the next token (so it'll hit the check + # above). + return is_simple_lookup(index + 1, kind=1) + + return False + + new_line = line.clone() + should_hug = False + for idx, leaf in enumerate(line.leaves): + new_leaf = leaf.clone() + if should_hug: + new_leaf.prefix = "" + should_hug = False + + should_hug = ( + (0 < idx < len(line.leaves) - 1) + and leaf.type == token.DOUBLESTAR + and is_simple_operand(idx - 1, kind=-1) + and line.leaves[idx - 1].value != "lambda" + and is_simple_operand(idx + 1, kind=1) + ) + if should_hug: + new_leaf.prefix = "" + + # We have to be careful to make a new line properly: + # - bracket related metadata must be maintained (handled by Line.append) + # - comments need to copied over, updating the leaf IDs they're attached to + new_line.append(new_leaf, preformatted=True) + for comment_leaf in line.comments_after(leaf): + new_line.append(comment_leaf, preformatted=True) + + yield new_line + + +def handle_is_simple_look_up_prev(line: Line, index: int, disallowed: set[int]) -> bool: + """ + Handling the determination of is_simple_lookup for the lines prior to the doublestar + token. This is required because of the need to isolate the chained expression + to determine the bracket or parenthesis belong to the single expression. + """ + contains_disallowed = False + chain = [] + + while 0 <= index < len(line.leaves): + current = line.leaves[index] + chain.append(current) + if not contains_disallowed and current.type in disallowed: + contains_disallowed = True + if not is_expression_chained(chain): + return not contains_disallowed + + index -= 1 + + return True + + +def handle_is_simple_lookup_forward( + line: Line, index: int, disallowed: set[int] +) -> bool: + """ + Handling decision is_simple_lookup for the lines behind the doublestar token. + This function is simplified to keep consistent with the prior logic and the forward + case are more straightforward and do not need to care about chained expressions. + """ + while 0 <= index < len(line.leaves): + current = line.leaves[index] + if current.type in disallowed: + return False + if current.type not in {token.NAME, token.DOT} or ( + current.type == token.NAME and current.value == "for" + ): + # If the current token isn't disallowed, we'll assume this is simple as + # only the disallowed tokens are semantically attached to this lookup + # expression we're checking. Also, stop early if we hit the 'for' bit + # of a comprehension. + return True + + index += 1 + + return True + + +def is_expression_chained(chained_leaves: list[Leaf]) -> bool: + """ + Function to determine if the variable is a chained call. + (e.g., foo.lookup, foo().lookup, (foo.lookup())) will be recognized as chained call) + """ + if len(chained_leaves) < 2: + return True + + current_leaf = chained_leaves[-1] + past_leaf = chained_leaves[-2] + + if past_leaf.type == token.NAME: + return current_leaf.type in {token.DOT} + elif past_leaf.type in {token.RPAR, token.RSQB}: + return current_leaf.type in {token.RSQB, token.RPAR} + elif past_leaf.type in {token.LPAR, token.LSQB}: + return current_leaf.type in {token.NAME, token.LPAR, token.LSQB} + else: + return False + + +class StringTransformer(ABC): + """ + An implementation of the Transformer protocol that relies on its + subclasses overriding the template methods `do_match(...)` and + `do_transform(...)`. + + This Transformer works exclusively on strings (for example, by merging + or splitting them). + + The following sections can be found among the docstrings of each concrete + StringTransformer subclass. + + Requirements: + Which requirements must be met of the given Line for this + StringTransformer to be applied? + + Transformations: + If the given Line meets all of the above requirements, which string + transformations can you expect to be applied to it by this + StringTransformer? + + Collaborations: + What contractual agreements does this StringTransformer have with other + StringTransfomers? Such collaborations should be eliminated/minimized + as much as possible. + """ + + __name__: Final = "StringTransformer" + + # Ideally this would be a dataclass, but unfortunately mypyc breaks when used with + # `abc.ABC`. + def __init__(self, line_length: int, normalize_strings: bool) -> None: + self.line_length = line_length + self.normalize_strings = normalize_strings + + @abstractmethod + def do_match(self, line: Line) -> TMatchResult: + """ + Returns: + * Ok(string_indices) such that for each index, `line.leaves[index]` + is our target string if a match was able to be made. For + transformers that don't result in more lines (e.g. StringMerger, + StringParenStripper), multiple matches and transforms are done at + once to reduce the complexity. + OR + * Err(CannotTransform), if no match could be made. + """ + + @abstractmethod + def do_transform( + self, line: Line, string_indices: list[int] + ) -> Iterator[TResult[Line]]: + """ + Yields: + * Ok(new_line) where new_line is the new transformed line. + OR + * Err(CannotTransform) if the transformation failed for some reason. The + `do_match(...)` template method should usually be used to reject + the form of the given Line, but in some cases it is difficult to + know whether or not a Line meets the StringTransformer's + requirements until the transformation is already midway. + + Side Effects: + This method should NOT mutate @line directly, but it MAY mutate the + Line's underlying Node structure. (WARNING: If the underlying Node + structure IS altered, then this method should NOT be allowed to + yield an CannotTransform after that point.) + """ + + def __call__( + self, line: Line, _features: Collection[Feature], _mode: Mode + ) -> Iterator[Line]: + """ + StringTransformer instances have a call signature that mirrors that of + the Transformer type. + + Raises: + CannotTransform(...) if the concrete StringTransformer class is unable + to transform @line. + """ + # Optimization to avoid calling `self.do_match(...)` when the line does + # not contain any string. + if not any(leaf.type == token.STRING for leaf in line.leaves): + raise CannotTransform("There are no strings in this line.") + + match_result = self.do_match(line) + + if isinstance(match_result, Err): + cant_transform = match_result.err() + raise CannotTransform( + f"The string transformer {self.__class__.__name__} does not recognize" + " this line as one that it can transform." + ) from cant_transform + + string_indices = match_result.ok() + + for line_result in self.do_transform(line, string_indices): + if isinstance(line_result, Err): + cant_transform = line_result.err() + raise CannotTransform( + "StringTransformer failed while attempting to transform string." + ) from cant_transform + line = line_result.ok() + yield line + + +@dataclass +class CustomSplit: + """A custom (i.e. manual) string split. + + A single CustomSplit instance represents a single substring. + + Examples: + Consider the following string: + ``` + "Hi there friend." + " This is a custom" + f" string {split}." + ``` + + This string will correspond to the following three CustomSplit instances: + ``` + CustomSplit(False, 16) + CustomSplit(False, 17) + CustomSplit(True, 16) + ``` + """ + + has_prefix: bool + break_idx: int + + +CustomSplitMapKey = tuple[StringID, str] + + +@trait +class CustomSplitMapMixin: + """ + This mixin class is used to map merged strings to a sequence of + CustomSplits, which will then be used to re-split the strings iff none of + the resultant substrings go over the configured max line length. + """ + + _CUSTOM_SPLIT_MAP: ClassVar[dict[CustomSplitMapKey, tuple[CustomSplit, ...]]] = ( + defaultdict(tuple) + ) + + @staticmethod + def _get_key(string: str) -> CustomSplitMapKey: + """ + Returns: + A unique identifier that is used internally to map @string to a + group of custom splits. + """ + return (id(string), string) + + def add_custom_splits( + self, string: str, custom_splits: Iterable[CustomSplit] + ) -> None: + """Custom Split Map Setter Method + + Side Effects: + Adds a mapping from @string to the custom splits @custom_splits. + """ + key = self._get_key(string) + self._CUSTOM_SPLIT_MAP[key] = tuple(custom_splits) + + def pop_custom_splits(self, string: str) -> list[CustomSplit]: + """Custom Split Map Getter Method + + Returns: + * A list of the custom splits that are mapped to @string, if any + exist. + OR + * [], otherwise. + + Side Effects: + Deletes the mapping between @string and its associated custom + splits (which are returned to the caller). + """ + key = self._get_key(string) + + custom_splits = self._CUSTOM_SPLIT_MAP[key] + del self._CUSTOM_SPLIT_MAP[key] + + return list(custom_splits) + + def has_custom_splits(self, string: str) -> bool: + """ + Returns: + True iff @string is associated with a set of custom splits. + """ + key = self._get_key(string) + return key in self._CUSTOM_SPLIT_MAP + + +class StringMerger(StringTransformer, CustomSplitMapMixin): + """StringTransformer that merges strings together. + + Requirements: + (A) The line contains adjacent strings such that ALL of the validation checks + listed in StringMerger._validate_msg(...)'s docstring pass. + OR + (B) The line contains a string which uses line continuation backslashes. + + Transformations: + Depending on which of the two requirements above where met, either: + + (A) The string group associated with the target string is merged. + OR + (B) All line-continuation backslashes are removed from the target string. + + Collaborations: + StringMerger provides custom split information to StringSplitter. + """ + + def do_match(self, line: Line) -> TMatchResult: + LL = line.leaves + + is_valid_index = is_valid_index_factory(LL) + + string_indices = [] + idx = 0 + while is_valid_index(idx): + leaf = LL[idx] + if ( + leaf.type == token.STRING + and is_valid_index(idx + 1) + and LL[idx + 1].type == token.STRING + ): + # Let's check if the string group contains an inline comment + # If we have a comment inline, we don't merge the strings + contains_comment = False + i = idx + while is_valid_index(i): + if LL[i].type != token.STRING: + break + if line.comments_after(LL[i]): + contains_comment = True + break + i += 1 + + if not contains_comment and not is_part_of_annotation(leaf): + string_indices.append(idx) + + # Advance to the next non-STRING leaf. + idx += 2 + while is_valid_index(idx) and LL[idx].type == token.STRING: + idx += 1 + + elif leaf.type == token.STRING and "\\\n" in leaf.value: + string_indices.append(idx) + # Advance to the next non-STRING leaf. + idx += 1 + while is_valid_index(idx) and LL[idx].type == token.STRING: + idx += 1 + + else: + idx += 1 + + if string_indices: + return Ok(string_indices) + else: + return TErr("This line has no strings that need merging.") + + def do_transform( + self, line: Line, string_indices: list[int] + ) -> Iterator[TResult[Line]]: + new_line = line + + rblc_result = self._remove_backslash_line_continuation_chars( + new_line, string_indices + ) + if isinstance(rblc_result, Ok): + new_line = rblc_result.ok() + + msg_result = self._merge_string_group(new_line, string_indices) + if isinstance(msg_result, Ok): + new_line = msg_result.ok() + + if isinstance(rblc_result, Err) and isinstance(msg_result, Err): + msg_cant_transform = msg_result.err() + rblc_cant_transform = rblc_result.err() + cant_transform = CannotTransform( + "StringMerger failed to merge any strings in this line." + ) + + # Chain the errors together using `__cause__`. + msg_cant_transform.__cause__ = rblc_cant_transform + cant_transform.__cause__ = msg_cant_transform + + yield Err(cant_transform) + else: + yield Ok(new_line) + + @staticmethod + def _remove_backslash_line_continuation_chars( + line: Line, string_indices: list[int] + ) -> TResult[Line]: + """ + Merge strings that were split across multiple lines using + line-continuation backslashes. + + Returns: + Ok(new_line), if @line contains backslash line-continuation + characters. + OR + Err(CannotTransform), otherwise. + """ + LL = line.leaves + + indices_to_transform = [] + for string_idx in string_indices: + string_leaf = LL[string_idx] + if ( + string_leaf.type == token.STRING + and "\\\n" in string_leaf.value + and not has_triple_quotes(string_leaf.value) + ): + indices_to_transform.append(string_idx) + + if not indices_to_transform: + return TErr( + "Found no string leaves that contain backslash line continuation" + " characters." + ) + + new_line = line.clone() + new_line.comments = line.comments.copy() + append_leaves(new_line, line, LL) + + for string_idx in indices_to_transform: + new_string_leaf = new_line.leaves[string_idx] + new_string_leaf.value = new_string_leaf.value.replace("\\\n", "") + + return Ok(new_line) + + def _merge_string_group( + self, line: Line, string_indices: list[int] + ) -> TResult[Line]: + """ + Merges string groups (i.e. set of adjacent strings). + + Each index from `string_indices` designates one string group's first + leaf in `line.leaves`. + + Returns: + Ok(new_line), if ALL of the validation checks found in + _validate_msg(...) pass. + OR + Err(CannotTransform), otherwise. + """ + LL = line.leaves + + is_valid_index = is_valid_index_factory(LL) + + # A dict of {string_idx: tuple[num_of_strings, string_leaf]}. + merged_string_idx_dict: dict[int, tuple[int, Leaf]] = {} + for string_idx in string_indices: + vresult = self._validate_msg(line, string_idx) + if isinstance(vresult, Err): + continue + merged_string_idx_dict[string_idx] = self._merge_one_string_group( + LL, string_idx, is_valid_index + ) + + if not merged_string_idx_dict: + return TErr("No string group is merged") + + # Build the final line ('new_line') that this method will later return. + new_line = line.clone() + previous_merged_string_idx = -1 + previous_merged_num_of_strings = -1 + for i, leaf in enumerate(LL): + if i in merged_string_idx_dict: + previous_merged_string_idx = i + previous_merged_num_of_strings, string_leaf = merged_string_idx_dict[i] + new_line.append(string_leaf) + + if ( + previous_merged_string_idx + <= i + < previous_merged_string_idx + previous_merged_num_of_strings + ): + for comment_leaf in line.comments_after(leaf): + new_line.append(comment_leaf, preformatted=True) + continue + + append_leaves(new_line, line, [leaf]) + + return Ok(new_line) + + def _merge_one_string_group( + self, LL: list[Leaf], string_idx: int, is_valid_index: Callable[[int], bool] + ) -> tuple[int, Leaf]: + """ + Merges one string group where the first string in the group is + `LL[string_idx]`. + + Returns: + A tuple of `(num_of_strings, leaf)` where `num_of_strings` is the + number of strings merged and `leaf` is the newly merged string + to be replaced in the new line. + """ + # If the string group is wrapped inside an Atom node, we must make sure + # to later replace that Atom with our new (merged) string leaf. + atom_node = LL[string_idx].parent + + # We will place BREAK_MARK in between every two substrings that we + # merge. We will then later go through our final result and use the + # various instances of BREAK_MARK we find to add the right values to + # the custom split map. + BREAK_MARK = "@@@@@ BLACK BREAKPOINT MARKER @@@@@" + + QUOTE = LL[string_idx].value[-1] + + def make_naked(string: str, string_prefix: str) -> str: + """Strip @string (i.e. make it a "naked" string) + + Pre-conditions: + * assert_is_leaf_string(@string) + + Returns: + A string that is identical to @string except that + @string_prefix has been stripped, the surrounding QUOTE + characters have been removed, and any remaining QUOTE + characters have been escaped. + """ + assert_is_leaf_string(string) + if "f" in string_prefix: + f_expressions = [ + string[span[0] + 1 : span[1] - 1] # +-1 to get rid of curly braces + for span in iter_fexpr_spans(string) + ] + debug_expressions_contain_visible_quotes = any( + re.search(r".*[\'\"].*(?= 0 + ), "Logic error while filling the custom string breakpoint cache." + + temp_string = temp_string[mark_idx + len(BREAK_MARK) :] + breakpoint_idx = mark_idx + (len(prefix) if has_prefix else 0) + 1 + custom_splits.append(CustomSplit(has_prefix, breakpoint_idx)) + + string_leaf = Leaf(token.STRING, S_leaf.value.replace(BREAK_MARK, "")) + + if atom_node is not None: + # If not all children of the atom node are merged (this can happen + # when there is a standalone comment in the middle) ... + if non_string_idx - string_idx < len(atom_node.children): + # We need to replace the old STRING leaves with the new string leaf. + first_child_idx = LL[string_idx].remove() + for idx in range(string_idx + 1, non_string_idx): + LL[idx].remove() + if first_child_idx is not None: + atom_node.insert_child(first_child_idx, string_leaf) + else: + # Else replace the atom node with the new string leaf. + replace_child(atom_node, string_leaf) + + self.add_custom_splits(string_leaf.value, custom_splits) + return num_of_strings, string_leaf + + @staticmethod + def _validate_msg(line: Line, string_idx: int) -> TResult[None]: + """Validate (M)erge (S)tring (G)roup + + Transform-time string validation logic for _merge_string_group(...). + + Returns: + * Ok(None), if ALL validation checks (listed below) pass. + OR + * Err(CannotTransform), if any of the following are true: + - The target string group does not contain ANY stand-alone comments. + - The target string is not in a string group (i.e. it has no + adjacent strings). + - The string group has more than one inline comment. + - The string group has an inline comment that appears to be a pragma. + - The set of all string prefixes in the string group is of + length greater than one and is not equal to {"", "f"}. + - The string group consists of raw strings. + - The string group would merge f-strings with different quote types + and internal quotes. + - The string group is stringified type annotations. We don't want to + process stringified type annotations since pyright doesn't support + them spanning multiple string values. (NOTE: mypy, pytype, pyre do + support them, so we can change if pyright also gains support in the + future. See https://github.com/microsoft/pyright/issues/4359.) + """ + # We first check for "inner" stand-alone comments (i.e. stand-alone + # comments that have a string leaf before them AND after them). + for inc in [1, -1]: + i = string_idx + found_sa_comment = False + is_valid_index = is_valid_index_factory(line.leaves) + while is_valid_index(i) and line.leaves[i].type in [ + token.STRING, + STANDALONE_COMMENT, + ]: + if line.leaves[i].type == STANDALONE_COMMENT: + found_sa_comment = True + elif found_sa_comment: + return TErr( + "StringMerger does NOT merge string groups which contain " + "stand-alone comments." + ) + + i += inc + + QUOTE = line.leaves[string_idx].value[-1] + + num_of_inline_string_comments = 0 + set_of_prefixes = set() + num_of_strings = 0 + for leaf in line.leaves[string_idx:]: + if leaf.type != token.STRING: + # If the string group is trailed by a comma, we count the + # comments trailing the comma to be one of the string group's + # comments. + if leaf.type == token.COMMA and id(leaf) in line.comments: + num_of_inline_string_comments += 1 + break + + if has_triple_quotes(leaf.value): + return TErr("StringMerger does NOT merge multiline strings.") + + num_of_strings += 1 + prefix = get_string_prefix(leaf.value).lower() + if "r" in prefix: + return TErr("StringMerger does NOT merge raw strings.") + + set_of_prefixes.add(prefix) + + if ( + "f" in prefix + and leaf.value[-1] != QUOTE + and ( + "'" in leaf.value[len(prefix) + 1 : -1] + or '"' in leaf.value[len(prefix) + 1 : -1] + ) + ): + return TErr( + "StringMerger does NOT merge f-strings with different quote types" + " and internal quotes." + ) + + if id(leaf) in line.comments: + num_of_inline_string_comments += 1 + if contains_pragma_comment(line.comments[id(leaf)]): + return TErr("Cannot merge strings which have pragma comments.") + + if num_of_strings < 2: + return TErr( + f"Not enough strings to merge (num_of_strings={num_of_strings})." + ) + + if num_of_inline_string_comments > 1: + return TErr( + f"Too many inline string comments ({num_of_inline_string_comments})." + ) + + if len(set_of_prefixes) > 1 and set_of_prefixes != {"", "f"}: + return TErr(f"Too many different prefixes ({set_of_prefixes}).") + + return Ok(None) + + +class StringParenStripper(StringTransformer): + """StringTransformer that strips surrounding parentheses from strings. + + Requirements: + The line contains a string which is surrounded by parentheses and: + - The target string is NOT the only argument to a function call. + - The target string is NOT a "pointless" string. + - The target string is NOT a dictionary value. + - If the target string contains a PERCENT, the brackets are not + preceded or followed by an operator with higher precedence than + PERCENT. + + Transformations: + The parentheses mentioned in the 'Requirements' section are stripped. + + Collaborations: + StringParenStripper has its own inherent usefulness, but it is also + relied on to clean up the parentheses created by StringParenWrapper (in + the event that they are no longer needed). + """ + + def do_match(self, line: Line) -> TMatchResult: + LL = line.leaves + + is_valid_index = is_valid_index_factory(LL) + + string_indices = [] + + idx = -1 + while True: + idx += 1 + if idx >= len(LL): + break + leaf = LL[idx] + + # Should be a string... + if leaf.type != token.STRING: + continue + + # If this is a "pointless" string... + if ( + leaf.parent + and leaf.parent.parent + and leaf.parent.parent.type == syms.simple_stmt + ): + continue + + # Should be preceded by a non-empty LPAR... + if ( + not is_valid_index(idx - 1) + or LL[idx - 1].type != token.LPAR + or is_empty_lpar(LL[idx - 1]) + ): + continue + + # That LPAR should NOT be preceded by a colon (which could be a + # dictionary value), function name, or a closing bracket (which + # could be a function returning a function or a list/dictionary + # containing a function)... + if is_valid_index(idx - 2) and ( + LL[idx - 2].type == token.COLON + or LL[idx - 2].type == token.NAME + or LL[idx - 2].type in CLOSING_BRACKETS + ): + continue + + string_idx = idx + + # Skip the string trailer, if one exists. + string_parser = StringParser() + next_idx = string_parser.parse(LL, string_idx) + + # if the leaves in the parsed string include a PERCENT, we need to + # make sure the initial LPAR is NOT preceded by an operator with + # higher or equal precedence to PERCENT + if is_valid_index(idx - 2): + # mypy can't quite follow unless we name this + before_lpar = LL[idx - 2] + if token.PERCENT in {leaf.type for leaf in LL[idx - 1 : next_idx]} and ( + ( + before_lpar.type + in { + token.STAR, + token.AT, + token.SLASH, + token.DOUBLESLASH, + token.PERCENT, + token.TILDE, + token.DOUBLESTAR, + token.AWAIT, + token.LSQB, + token.LPAR, + } + ) + or ( + # only unary PLUS/MINUS + before_lpar.parent + and before_lpar.parent.type == syms.factor + and (before_lpar.type in {token.PLUS, token.MINUS}) + ) + ): + continue + + # Should be followed by a non-empty RPAR... + if ( + is_valid_index(next_idx) + and LL[next_idx].type == token.RPAR + and not is_empty_rpar(LL[next_idx]) + ): + # That RPAR should NOT be followed by anything with higher + # precedence than PERCENT + if is_valid_index(next_idx + 1) and LL[next_idx + 1].type in { + token.DOUBLESTAR, + token.LSQB, + token.LPAR, + token.DOT, + }: + continue + + string_indices.append(string_idx) + idx = string_idx + while idx < len(LL) - 1 and LL[idx + 1].type == token.STRING: + idx += 1 + + if string_indices: + return Ok(string_indices) + return TErr("This line has no strings wrapped in parens.") + + def do_transform( + self, line: Line, string_indices: list[int] + ) -> Iterator[TResult[Line]]: + LL = line.leaves + + string_and_rpar_indices: list[int] = [] + for string_idx in string_indices: + string_parser = StringParser() + rpar_idx = string_parser.parse(LL, string_idx) + + should_transform = True + for leaf in (LL[string_idx - 1], LL[rpar_idx]): + if line.comments_after(leaf): + # Should not strip parentheses which have comments attached + # to them. + should_transform = False + break + if should_transform: + string_and_rpar_indices.extend((string_idx, rpar_idx)) + + if string_and_rpar_indices: + yield Ok(self._transform_to_new_line(line, string_and_rpar_indices)) + else: + yield Err( + CannotTransform("All string groups have comments attached to them.") + ) + + def _transform_to_new_line( + self, line: Line, string_and_rpar_indices: list[int] + ) -> Line: + LL = line.leaves + + new_line = line.clone() + new_line.comments = line.comments.copy() + + previous_idx = -1 + # We need to sort the indices, since string_idx and its matching + # rpar_idx may not come in order, e.g. in + # `("outer" % ("inner".join(items)))`, the "inner" string's + # string_idx is smaller than "outer" string's rpar_idx. + for idx in sorted(string_and_rpar_indices): + leaf = LL[idx] + lpar_or_rpar_idx = idx - 1 if leaf.type == token.STRING else idx + append_leaves(new_line, line, LL[previous_idx + 1 : lpar_or_rpar_idx]) + if leaf.type == token.STRING: + string_leaf = Leaf(token.STRING, LL[idx].value) + LL[lpar_or_rpar_idx].remove() # Remove lpar. + replace_child(LL[idx], string_leaf) + new_line.append(string_leaf) + # replace comments + old_comments = new_line.comments.pop(id(LL[idx]), []) + new_line.comments.setdefault(id(string_leaf), []).extend(old_comments) + else: + LL[lpar_or_rpar_idx].remove() # This is a rpar. + + previous_idx = idx + + # Append the leaves after the last idx: + append_leaves(new_line, line, LL[idx + 1 :]) + + return new_line + + +class BaseStringSplitter(StringTransformer): + """ + Abstract class for StringTransformers which transform a Line's strings by splitting + them or placing them on their own lines where necessary to avoid going over + the configured line length. + + Requirements: + * The target string value is responsible for the line going over the + line length limit. It follows that after all of black's other line + split methods have been exhausted, this line (or one of the resulting + lines after all line splits are performed) would still be over the + line_length limit unless we split this string. + AND + + * The target string is NOT a "pointless" string (i.e. a string that has + no parent or siblings). + AND + + * The target string is not followed by an inline comment that appears + to be a pragma. + AND + + * The target string is not a multiline (i.e. triple-quote) string. + """ + + STRING_OPERATORS: Final = [ + token.EQEQUAL, + token.GREATER, + token.GREATEREQUAL, + token.LESS, + token.LESSEQUAL, + token.NOTEQUAL, + token.PERCENT, + token.PLUS, + token.STAR, + ] + + @abstractmethod + def do_splitter_match(self, line: Line) -> TMatchResult: + """ + BaseStringSplitter asks its clients to override this method instead of + `StringTransformer.do_match(...)`. + + Follows the same protocol as `StringTransformer.do_match(...)`. + + Refer to `help(StringTransformer.do_match)` for more information. + """ + + def do_match(self, line: Line) -> TMatchResult: + match_result = self.do_splitter_match(line) + if isinstance(match_result, Err): + return match_result + + string_indices = match_result.ok() + assert len(string_indices) == 1, ( + f"{self.__class__.__name__} should only find one match at a time, found" + f" {len(string_indices)}" + ) + string_idx = string_indices[0] + vresult = self._validate(line, string_idx) + if isinstance(vresult, Err): + return vresult + + return match_result + + def _validate(self, line: Line, string_idx: int) -> TResult[None]: + """ + Checks that @line meets all of the requirements listed in this classes' + docstring. Refer to `help(BaseStringSplitter)` for a detailed + description of those requirements. + + Returns: + * Ok(None), if ALL of the requirements are met. + OR + * Err(CannotTransform), if ANY of the requirements are NOT met. + """ + LL = line.leaves + + string_leaf = LL[string_idx] + + max_string_length = self._get_max_string_length(line, string_idx) + if len(string_leaf.value) <= max_string_length: + return TErr( + "The string itself is not what is causing this line to be too long." + ) + + if not string_leaf.parent or [L.type for L in string_leaf.parent.children] == [ + token.STRING, + token.NEWLINE, + ]: + return TErr( + f"This string ({string_leaf.value}) appears to be pointless (i.e. has" + " no parent)." + ) + + if id(line.leaves[string_idx]) in line.comments and contains_pragma_comment( + line.comments[id(line.leaves[string_idx])] + ): + return TErr( + "Line appears to end with an inline pragma comment. Splitting the line" + " could modify the pragma's behavior." + ) + + if has_triple_quotes(string_leaf.value): + return TErr("We cannot split multiline strings.") + + return Ok(None) + + def _get_max_string_length(self, line: Line, string_idx: int) -> int: + """ + Calculates the max string length used when attempting to determine + whether or not the target string is responsible for causing the line to + go over the line length limit. + + WARNING: This method is tightly coupled to both StringSplitter and + (especially) StringParenWrapper. There is probably a better way to + accomplish what is being done here. + + Returns: + max_string_length: such that `line.leaves[string_idx].value > + max_string_length` implies that the target string IS responsible + for causing this line to exceed the line length limit. + """ + LL = line.leaves + + is_valid_index = is_valid_index_factory(LL) + + # We use the shorthand "WMA4" in comments to abbreviate "We must + # account for". When giving examples, we use STRING to mean some/any + # valid string. + # + # Finally, we use the following convenience variables: + # + # P: The leaf that is before the target string leaf. + # N: The leaf that is after the target string leaf. + # NN: The leaf that is after N. + + # WMA4 the whitespace at the beginning of the line. + offset = line.depth * 4 + + if is_valid_index(string_idx - 1): + p_idx = string_idx - 1 + if ( + LL[string_idx - 1].type == token.LPAR + and LL[string_idx - 1].value == "" + and string_idx >= 2 + ): + # If the previous leaf is an empty LPAR placeholder, we should skip it. + p_idx -= 1 + + P = LL[p_idx] + if P.type in self.STRING_OPERATORS: + # WMA4 a space and a string operator (e.g. `+ STRING` or `== STRING`). + offset += len(str(P)) + 1 + + if P.type == token.COMMA: + # WMA4 a space, a comma, and a closing bracket [e.g. `), STRING`]. + offset += 3 + + if P.type in [token.COLON, token.EQUAL, token.PLUSEQUAL, token.NAME]: + # This conditional branch is meant to handle dictionary keys, + # variable assignments, 'return STRING' statement lines, and + # 'else STRING' ternary expression lines. + + # WMA4 a single space. + offset += 1 + + # WMA4 the lengths of any leaves that came before that space, + # but after any closing bracket before that space. + for leaf in reversed(LL[: p_idx + 1]): + offset += len(str(leaf)) + if leaf.type in CLOSING_BRACKETS: + break + + if is_valid_index(string_idx + 1): + N = LL[string_idx + 1] + if N.type == token.RPAR and N.value == "" and len(LL) > string_idx + 2: + # If the next leaf is an empty RPAR placeholder, we should skip it. + N = LL[string_idx + 2] + + if N.type == token.COMMA: + # WMA4 a single comma at the end of the string (e.g `STRING,`). + offset += 1 + + if is_valid_index(string_idx + 2): + NN = LL[string_idx + 2] + + if N.type == token.DOT and NN.type == token.NAME: + # This conditional branch is meant to handle method calls invoked + # off of a string literal up to and including the LPAR character. + + # WMA4 the '.' character. + offset += 1 + + if ( + is_valid_index(string_idx + 3) + and LL[string_idx + 3].type == token.LPAR + ): + # WMA4 the left parenthesis character. + offset += 1 + + # WMA4 the length of the method's name. + offset += len(NN.value) + + has_comments = False + for comment_leaf in line.comments_after(LL[string_idx]): + if not has_comments: + has_comments = True + # WMA4 two spaces before the '#' character. + offset += 2 + + # WMA4 the length of the inline comment. + offset += len(comment_leaf.value) + + max_string_length = count_chars_in_width(str(line), self.line_length - offset) + return max_string_length + + @staticmethod + def _prefer_paren_wrap_match(LL: list[Leaf]) -> int | None: + """ + Returns: + string_idx such that @LL[string_idx] is equal to our target (i.e. + matched) string, if this line matches the "prefer paren wrap" statement + requirements listed in the 'Requirements' section of the StringParenWrapper + class's docstring. + OR + None, otherwise. + """ + # The line must start with a string. + if LL[0].type != token.STRING: + return None + + matching_nodes = [ + syms.listmaker, + syms.dictsetmaker, + syms.testlist_gexp, + ] + # If the string is an immediate child of a list/set/tuple literal... + if ( + parent_type(LL[0]) in matching_nodes + or parent_type(LL[0].parent) in matching_nodes + ): + # And the string is surrounded by commas (or is the first/last child)... + prev_sibling = LL[0].prev_sibling + next_sibling = LL[0].next_sibling + if ( + not prev_sibling + and not next_sibling + and parent_type(LL[0]) == syms.atom + ): + # If it's an atom string, we need to check the parent atom's siblings. + parent = LL[0].parent + assert parent is not None # For type checkers. + prev_sibling = parent.prev_sibling + next_sibling = parent.next_sibling + if (not prev_sibling or prev_sibling.type == token.COMMA) and ( + not next_sibling or next_sibling.type == token.COMMA + ): + return 0 + + return None + + +def iter_fexpr_spans(s: str) -> Iterator[tuple[int, int]]: + """ + Yields spans corresponding to expressions in a given f-string. + Spans are half-open ranges (left inclusive, right exclusive). + Assumes the input string is a valid f-string, but will not crash if the input + string is invalid. + """ + stack: list[int] = [] # our curly paren stack + i = 0 + while i < len(s): + if s[i] == "{": + # if we're in a string part of the f-string, ignore escaped curly braces + if not stack and i + 1 < len(s) and s[i + 1] == "{": + i += 2 + continue + stack.append(i) + i += 1 + continue + + if s[i] == "}": + if not stack: + i += 1 + continue + j = stack.pop() + # we've made it back out of the expression! yield the span + if not stack: + yield (j, i + 1) + i += 1 + continue + + # if we're in an expression part of the f-string, fast-forward through strings + # note that backslashes are not legal in the expression portion of f-strings + if stack: + delim = None + if s[i : i + 3] in ("'''", '"""'): + delim = s[i : i + 3] + elif s[i] in ("'", '"'): + delim = s[i] + if delim: + i += len(delim) + while i < len(s) and s[i : i + len(delim)] != delim: + i += 1 + i += len(delim) + continue + i += 1 + + +def fstring_contains_expr(s: str) -> bool: + return any(iter_fexpr_spans(s)) + + +def _toggle_fexpr_quotes(fstring: str, old_quote: str) -> str: + """ + Toggles quotes used in f-string expressions that are `old_quote`. + + f-string expressions can't contain backslashes, so we need to toggle the + quotes if the f-string itself will end up using the same quote. We can + simply toggle without escaping because, quotes can't be reused in f-string + expressions. They will fail to parse. + + NOTE: If PEP 701 is accepted, above statement will no longer be true. + Though if quotes can be reused, we can simply reuse them without updates or + escaping, once Black figures out how to parse the new grammar. + """ + new_quote = "'" if old_quote == '"' else '"' + parts = [] + previous_index = 0 + for start, end in iter_fexpr_spans(fstring): + parts.append(fstring[previous_index:start]) + parts.append(fstring[start:end].replace(old_quote, new_quote)) + previous_index = end + parts.append(fstring[previous_index:]) + return "".join(parts) + + +class StringSplitter(BaseStringSplitter, CustomSplitMapMixin): + """ + StringTransformer that splits "atom" strings (i.e. strings which exist on + lines by themselves). + + Requirements: + * The line consists ONLY of a single string (possibly prefixed by a + string operator [e.g. '+' or '==']), MAYBE a string trailer, and MAYBE + a trailing comma. + AND + * All of the requirements listed in BaseStringSplitter's docstring. + + Transformations: + The string mentioned in the 'Requirements' section is split into as + many substrings as necessary to adhere to the configured line length. + + In the final set of substrings, no substring should be smaller than + MIN_SUBSTR_SIZE characters. + + The string will ONLY be split on spaces (i.e. each new substring should + start with a space). Note that the string will NOT be split on a space + which is escaped with a backslash. + + If the string is an f-string, it will NOT be split in the middle of an + f-expression (e.g. in f"FooBar: {foo() if x else bar()}", {foo() if x + else bar()} is an f-expression). + + If the string that is being split has an associated set of custom split + records and those custom splits will NOT result in any line going over + the configured line length, those custom splits are used. Otherwise the + string is split as late as possible (from left-to-right) while still + adhering to the transformation rules listed above. + + Collaborations: + StringSplitter relies on StringMerger to construct the appropriate + CustomSplit objects and add them to the custom split map. + """ + + MIN_SUBSTR_SIZE: Final = 6 + + def do_splitter_match(self, line: Line) -> TMatchResult: + LL = line.leaves + + if self._prefer_paren_wrap_match(LL) is not None: + return TErr("Line needs to be wrapped in parens first.") + + is_valid_index = is_valid_index_factory(LL) + + idx = 0 + + # The first two leaves MAY be the 'not in' keywords... + if ( + is_valid_index(idx) + and is_valid_index(idx + 1) + and [LL[idx].type, LL[idx + 1].type] == [token.NAME, token.NAME] + and str(LL[idx]) + str(LL[idx + 1]) == "not in" + ): + idx += 2 + # Else the first leaf MAY be a string operator symbol or the 'in' keyword... + elif is_valid_index(idx) and ( + LL[idx].type in self.STRING_OPERATORS + or LL[idx].type == token.NAME + and str(LL[idx]) == "in" + ): + idx += 1 + + # The next/first leaf MAY be an empty LPAR... + if is_valid_index(idx) and is_empty_lpar(LL[idx]): + idx += 1 + + # The next/first leaf MUST be a string... + if not is_valid_index(idx) or LL[idx].type != token.STRING: + return TErr("Line does not start with a string.") + + string_idx = idx + + # Skip the string trailer, if one exists. + string_parser = StringParser() + idx = string_parser.parse(LL, string_idx) + + # That string MAY be followed by an empty RPAR... + if is_valid_index(idx) and is_empty_rpar(LL[idx]): + idx += 1 + + # That string / empty RPAR leaf MAY be followed by a comma... + if is_valid_index(idx) and LL[idx].type == token.COMMA: + idx += 1 + + # But no more leaves are allowed... + if is_valid_index(idx): + return TErr("This line does not end with a string.") + + return Ok([string_idx]) + + def do_transform( + self, line: Line, string_indices: list[int] + ) -> Iterator[TResult[Line]]: + LL = line.leaves + assert len(string_indices) == 1, ( + f"{self.__class__.__name__} should only find one match at a time, found" + f" {len(string_indices)}" + ) + string_idx = string_indices[0] + + QUOTE = LL[string_idx].value[-1] + + is_valid_index = is_valid_index_factory(LL) + insert_str_child = insert_str_child_factory(LL[string_idx]) + + prefix = get_string_prefix(LL[string_idx].value).lower() + + # We MAY choose to drop the 'f' prefix from substrings that don't + # contain any f-expressions, but ONLY if the original f-string + # contains at least one f-expression. Otherwise, we will alter the AST + # of the program. + drop_pointless_f_prefix = ("f" in prefix) and fstring_contains_expr( + LL[string_idx].value + ) + + first_string_line = True + + string_op_leaves = self._get_string_operator_leaves(LL) + string_op_leaves_length = ( + sum(len(str(prefix_leaf)) for prefix_leaf in string_op_leaves) + 1 + if string_op_leaves + else 0 + ) + + def maybe_append_string_operators(new_line: Line) -> None: + """ + Side Effects: + If @line starts with a string operator and this is the first + line we are constructing, this function appends the string + operator to @new_line and replaces the old string operator leaf + in the node structure. Otherwise this function does nothing. + """ + maybe_prefix_leaves = string_op_leaves if first_string_line else [] + for i, prefix_leaf in enumerate(maybe_prefix_leaves): + replace_child(LL[i], prefix_leaf) + new_line.append(prefix_leaf) + + ends_with_comma = ( + is_valid_index(string_idx + 1) and LL[string_idx + 1].type == token.COMMA + ) + + def max_last_string_column() -> int: + """ + Returns: + The max allowed width of the string value used for the last + line we will construct. Note that this value means the width + rather than the number of characters (e.g., many East Asian + characters expand to two columns). + """ + result = self.line_length + result -= line.depth * 4 + result -= 1 if ends_with_comma else 0 + result -= string_op_leaves_length + return result + + # --- Calculate Max Break Width (for string value) + # We start with the line length limit + max_break_width = self.line_length + # The last index of a string of length N is N-1. + max_break_width -= 1 + # Leading whitespace is not present in the string value (e.g. Leaf.value). + max_break_width -= line.depth * 4 + if max_break_width < 0: + yield TErr( + f"Unable to split {LL[string_idx].value} at such high of a line depth:" + f" {line.depth}" + ) + return + + # Check if StringMerger registered any custom splits. + custom_splits = self.pop_custom_splits(LL[string_idx].value) + # We use them ONLY if none of them would produce lines that exceed the + # line limit. + use_custom_breakpoints = bool( + custom_splits + and all(csplit.break_idx <= max_break_width for csplit in custom_splits) + ) + + # Temporary storage for the remaining chunk of the string line that + # can't fit onto the line currently being constructed. + rest_value = LL[string_idx].value + + def more_splits_should_be_made() -> bool: + """ + Returns: + True iff `rest_value` (the remaining string value from the last + split), should be split again. + """ + if use_custom_breakpoints: + return len(custom_splits) > 1 + else: + return str_width(rest_value) > max_last_string_column() + + string_line_results: list[Ok[Line]] = [] + while more_splits_should_be_made(): + if use_custom_breakpoints: + # Custom User Split (manual) + csplit = custom_splits.pop(0) + break_idx = csplit.break_idx + else: + # Algorithmic Split (automatic) + max_bidx = ( + count_chars_in_width(rest_value, max_break_width) + - string_op_leaves_length + ) + maybe_break_idx = self._get_break_idx(rest_value, max_bidx) + if maybe_break_idx is None: + # If we are unable to algorithmically determine a good split + # and this string has custom splits registered to it, we + # fall back to using them--which means we have to start + # over from the beginning. + if custom_splits: + rest_value = LL[string_idx].value + string_line_results = [] + first_string_line = True + use_custom_breakpoints = True + continue + + # Otherwise, we stop splitting here. + break + + break_idx = maybe_break_idx + + # --- Construct `next_value` + next_value = rest_value[:break_idx] + QUOTE + + # HACK: The following 'if' statement is a hack to fix the custom + # breakpoint index in the case of either: (a) substrings that were + # f-strings but will have the 'f' prefix removed OR (b) substrings + # that were not f-strings but will now become f-strings because of + # redundant use of the 'f' prefix (i.e. none of the substrings + # contain f-expressions but one or more of them had the 'f' prefix + # anyway; in which case, we will prepend 'f' to _all_ substrings). + # + # There is probably a better way to accomplish what is being done + # here... + # + # If this substring is an f-string, we _could_ remove the 'f' + # prefix, and the current custom split did NOT originally use a + # prefix... + if ( + use_custom_breakpoints + and not csplit.has_prefix + and ( + # `next_value == prefix + QUOTE` happens when the custom + # split is an empty string. + next_value == prefix + QUOTE + or next_value != self._normalize_f_string(next_value, prefix) + ) + ): + # Then `csplit.break_idx` will be off by one after removing + # the 'f' prefix. + break_idx += 1 + next_value = rest_value[:break_idx] + QUOTE + + if drop_pointless_f_prefix: + next_value = self._normalize_f_string(next_value, prefix) + + # --- Construct `next_leaf` + next_leaf = Leaf(token.STRING, next_value) + insert_str_child(next_leaf) + self._maybe_normalize_string_quotes(next_leaf) + + # --- Construct `next_line` + next_line = line.clone() + maybe_append_string_operators(next_line) + next_line.append(next_leaf) + string_line_results.append(Ok(next_line)) + + rest_value = prefix + QUOTE + rest_value[break_idx:] + first_string_line = False + + yield from string_line_results + + if drop_pointless_f_prefix: + rest_value = self._normalize_f_string(rest_value, prefix) + + rest_leaf = Leaf(token.STRING, rest_value) + insert_str_child(rest_leaf) + + # NOTE: I could not find a test case that verifies that the following + # line is actually necessary, but it seems to be. Otherwise we risk + # not normalizing the last substring, right? + self._maybe_normalize_string_quotes(rest_leaf) + + last_line = line.clone() + maybe_append_string_operators(last_line) + + # If there are any leaves to the right of the target string... + if is_valid_index(string_idx + 1): + # We use `temp_value` here to determine how long the last line + # would be if we were to append all the leaves to the right of the + # target string to the last string line. + temp_value = rest_value + for leaf in LL[string_idx + 1 :]: + temp_value += str(leaf) + if leaf.type == token.LPAR: + break + + # Try to fit them all on the same line with the last substring... + if ( + str_width(temp_value) <= max_last_string_column() + or LL[string_idx + 1].type == token.COMMA + ): + last_line.append(rest_leaf) + append_leaves(last_line, line, LL[string_idx + 1 :]) + yield Ok(last_line) + # Otherwise, place the last substring on one line and everything + # else on a line below that... + else: + last_line.append(rest_leaf) + yield Ok(last_line) + + non_string_line = line.clone() + append_leaves(non_string_line, line, LL[string_idx + 1 :]) + yield Ok(non_string_line) + # Else the target string was the last leaf... + else: + last_line.append(rest_leaf) + last_line.comments = line.comments.copy() + yield Ok(last_line) + + def _iter_nameescape_slices(self, string: str) -> Iterator[tuple[Index, Index]]: + r""" + Yields: + All ranges of @string which, if @string were to be split there, + would result in the splitting of an \N{...} expression (which is NOT + allowed). + """ + # True - the previous backslash was unescaped + # False - the previous backslash was escaped *or* there was no backslash + previous_was_unescaped_backslash = False + it = iter(enumerate(string)) + for idx, c in it: + if c == "\\": + previous_was_unescaped_backslash = not previous_was_unescaped_backslash + continue + if not previous_was_unescaped_backslash or c != "N": + previous_was_unescaped_backslash = False + continue + previous_was_unescaped_backslash = False + + begin = idx - 1 # the position of backslash before \N{...} + for idx, c in it: + if c == "}": + end = idx + break + else: + # malformed nameescape expression? + # should have been detected by AST parsing earlier... + raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!") + yield begin, end + + def _iter_fexpr_slices(self, string: str) -> Iterator[tuple[Index, Index]]: + """ + Yields: + All ranges of @string which, if @string were to be split there, + would result in the splitting of an f-expression (which is NOT + allowed). + """ + if "f" not in get_string_prefix(string).lower(): + return + yield from iter_fexpr_spans(string) + + def _get_illegal_split_indices(self, string: str) -> set[Index]: + illegal_indices: set[Index] = set() + iterators = [ + self._iter_fexpr_slices(string), + self._iter_nameescape_slices(string), + ] + for it in iterators: + for begin, end in it: + illegal_indices.update(range(begin, end)) + return illegal_indices + + def _get_break_idx(self, string: str, max_break_idx: int) -> int | None: + """ + This method contains the algorithm that StringSplitter uses to + determine which character to split each string at. + + Args: + @string: The substring that we are attempting to split. + @max_break_idx: The ideal break index. We will return this value if it + meets all the necessary conditions. In the likely event that it + doesn't we will try to find the closest index BELOW @max_break_idx + that does. If that fails, we will expand our search by also + considering all valid indices ABOVE @max_break_idx. + + Pre-Conditions: + * assert_is_leaf_string(@string) + * 0 <= @max_break_idx < len(@string) + + Returns: + break_idx, if an index is able to be found that meets all of the + conditions listed in the 'Transformations' section of this classes' + docstring. + OR + None, otherwise. + """ + is_valid_index = is_valid_index_factory(string) + + assert is_valid_index(max_break_idx) + assert_is_leaf_string(string) + + _illegal_split_indices = self._get_illegal_split_indices(string) + + def breaks_unsplittable_expression(i: Index) -> bool: + """ + Returns: + True iff returning @i would result in the splitting of an + unsplittable expression (which is NOT allowed). + """ + return i in _illegal_split_indices + + def passes_all_checks(i: Index) -> bool: + """ + Returns: + True iff ALL of the conditions listed in the 'Transformations' + section of this classes' docstring would be met by returning @i. + """ + is_space = string[i] == " " + is_split_safe = is_valid_index(i - 1) and string[i - 1] in SPLIT_SAFE_CHARS + + is_not_escaped = True + j = i - 1 + while is_valid_index(j) and string[j] == "\\": + is_not_escaped = not is_not_escaped + j -= 1 + + is_big_enough = ( + len(string[i:]) >= self.MIN_SUBSTR_SIZE + and len(string[:i]) >= self.MIN_SUBSTR_SIZE + ) + return ( + (is_space or is_split_safe) + and is_not_escaped + and is_big_enough + and not breaks_unsplittable_expression(i) + ) + + # First, we check all indices BELOW @max_break_idx. + break_idx = max_break_idx + while is_valid_index(break_idx - 1) and not passes_all_checks(break_idx): + break_idx -= 1 + + if not passes_all_checks(break_idx): + # If that fails, we check all indices ABOVE @max_break_idx. + # + # If we are able to find a valid index here, the next line is going + # to be longer than the specified line length, but it's probably + # better than doing nothing at all. + break_idx = max_break_idx + 1 + while is_valid_index(break_idx + 1) and not passes_all_checks(break_idx): + break_idx += 1 + + if not is_valid_index(break_idx) or not passes_all_checks(break_idx): + return None + + return break_idx + + def _maybe_normalize_string_quotes(self, leaf: Leaf) -> None: + if self.normalize_strings: + leaf.value = normalize_string_quotes(leaf.value) + + def _normalize_f_string(self, string: str, prefix: str) -> str: + """ + Pre-Conditions: + * assert_is_leaf_string(@string) + + Returns: + * If @string is an f-string that contains no f-expressions, we + return a string identical to @string except that the 'f' prefix + has been stripped and all double braces (i.e. '{{' or '}}') have + been normalized (i.e. turned into '{' or '}'). + OR + * Otherwise, we return @string. + """ + assert_is_leaf_string(string) + + if "f" in prefix and not fstring_contains_expr(string): + new_prefix = prefix.replace("f", "") + + temp = string[len(prefix) :] + temp = re.sub(r"\{\{", "{", temp) + temp = re.sub(r"\}\}", "}", temp) + new_string = temp + + return f"{new_prefix}{new_string}" + else: + return string + + def _get_string_operator_leaves(self, leaves: Iterable[Leaf]) -> list[Leaf]: + LL = list(leaves) + + string_op_leaves = [] + i = 0 + while LL[i].type in self.STRING_OPERATORS + [token.NAME]: + prefix_leaf = Leaf(LL[i].type, str(LL[i]).strip()) + string_op_leaves.append(prefix_leaf) + i += 1 + return string_op_leaves + + +class StringParenWrapper(BaseStringSplitter, CustomSplitMapMixin): + """ + StringTransformer that wraps strings in parens and then splits at the LPAR. + + Requirements: + All of the requirements listed in BaseStringSplitter's docstring in + addition to the requirements listed below: + + * The line is a return/yield statement, which returns/yields a string. + OR + * The line is part of a ternary expression (e.g. `x = y if cond else + z`) such that the line starts with `else `, where is + some string. + OR + * The line is an assert statement, which ends with a string. + OR + * The line is an assignment statement (e.g. `x = ` or `x += + `) such that the variable is being assigned the value of some + string. + OR + * The line is a dictionary key assignment where some valid key is being + assigned the value of some string. + OR + * The line is an lambda expression and the value is a string. + OR + * The line starts with an "atom" string that prefers to be wrapped in + parens. It's preferred to be wrapped when it's is an immediate child of + a list/set/tuple literal, AND the string is surrounded by commas (or is + the first/last child). + + Transformations: + The chosen string is wrapped in parentheses and then split at the LPAR. + + We then have one line which ends with an LPAR and another line that + starts with the chosen string. The latter line is then split again at + the RPAR. This results in the RPAR (and possibly a trailing comma) + being placed on its own line. + + NOTE: If any leaves exist to the right of the chosen string (except + for a trailing comma, which would be placed after the RPAR), those + leaves are placed inside the parentheses. In effect, the chosen + string is not necessarily being "wrapped" by parentheses. We can, + however, count on the LPAR being placed directly before the chosen + string. + + In other words, StringParenWrapper creates "atom" strings. These + can then be split again by StringSplitter, if necessary. + + Collaborations: + In the event that a string line split by StringParenWrapper is + changed such that it no longer needs to be given its own line, + StringParenWrapper relies on StringParenStripper to clean up the + parentheses it created. + + For "atom" strings that prefers to be wrapped in parens, it requires + StringSplitter to hold the split until the string is wrapped in parens. + """ + + def do_splitter_match(self, line: Line) -> TMatchResult: + LL = line.leaves + + if line.leaves[-1].type in OPENING_BRACKETS: + return TErr( + "Cannot wrap parens around a line that ends in an opening bracket." + ) + + string_idx = ( + self._return_match(LL) + or self._else_match(LL) + or self._assert_match(LL) + or self._assign_match(LL) + or self._dict_or_lambda_match(LL) + or self._prefer_paren_wrap_match(LL) + ) + + if string_idx is not None: + string_value = line.leaves[string_idx].value + # If the string has neither spaces nor East Asian stops... + if not any( + char == " " or char in SPLIT_SAFE_CHARS for char in string_value + ): + # And will still violate the line length limit when split... + max_string_width = self.line_length - ((line.depth + 1) * 4) + if str_width(string_value) > max_string_width: + # And has no associated custom splits... + if not self.has_custom_splits(string_value): + # Then we should NOT put this string on its own line. + return TErr( + "We do not wrap long strings in parentheses when the" + " resultant line would still be over the specified line" + " length and can't be split further by StringSplitter." + ) + return Ok([string_idx]) + + return TErr("This line does not contain any non-atomic strings.") + + @staticmethod + def _return_match(LL: list[Leaf]) -> int | None: + """ + Returns: + string_idx such that @LL[string_idx] is equal to our target (i.e. + matched) string, if this line matches the return/yield statement + requirements listed in the 'Requirements' section of this classes' + docstring. + OR + None, otherwise. + """ + # If this line is a part of a return/yield statement and the first leaf + # contains either the "return" or "yield" keywords... + if parent_type(LL[0]) in [syms.return_stmt, syms.yield_expr] and LL[ + 0 + ].value in ["return", "yield"]: + is_valid_index = is_valid_index_factory(LL) + + idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1 + # The next visible leaf MUST contain a string... + if is_valid_index(idx) and LL[idx].type == token.STRING: + return idx + + return None + + @staticmethod + def _else_match(LL: list[Leaf]) -> int | None: + """ + Returns: + string_idx such that @LL[string_idx] is equal to our target (i.e. + matched) string, if this line matches the ternary expression + requirements listed in the 'Requirements' section of this classes' + docstring. + OR + None, otherwise. + """ + # If this line is a part of a ternary expression and the first leaf + # contains the "else" keyword... + if ( + parent_type(LL[0]) == syms.test + and LL[0].type == token.NAME + and LL[0].value == "else" + ): + is_valid_index = is_valid_index_factory(LL) + + idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1 + # The next visible leaf MUST contain a string... + if is_valid_index(idx) and LL[idx].type == token.STRING: + return idx + + return None + + @staticmethod + def _assert_match(LL: list[Leaf]) -> int | None: + """ + Returns: + string_idx such that @LL[string_idx] is equal to our target (i.e. + matched) string, if this line matches the assert statement + requirements listed in the 'Requirements' section of this classes' + docstring. + OR + None, otherwise. + """ + # If this line is a part of an assert statement and the first leaf + # contains the "assert" keyword... + if parent_type(LL[0]) == syms.assert_stmt and LL[0].value == "assert": + is_valid_index = is_valid_index_factory(LL) + + for i, leaf in enumerate(LL): + # We MUST find a comma... + if leaf.type == token.COMMA: + idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 + + # That comma MUST be followed by a string... + if is_valid_index(idx) and LL[idx].type == token.STRING: + string_idx = idx + + # Skip the string trailer, if one exists. + string_parser = StringParser() + idx = string_parser.parse(LL, string_idx) + + # But no more leaves are allowed... + if not is_valid_index(idx): + return string_idx + + return None + + @staticmethod + def _assign_match(LL: list[Leaf]) -> int | None: + """ + Returns: + string_idx such that @LL[string_idx] is equal to our target (i.e. + matched) string, if this line matches the assignment statement + requirements listed in the 'Requirements' section of this classes' + docstring. + OR + None, otherwise. + """ + # If this line is a part of an expression statement or is a function + # argument AND the first leaf contains a variable name... + if ( + parent_type(LL[0]) in [syms.expr_stmt, syms.argument, syms.power] + and LL[0].type == token.NAME + ): + is_valid_index = is_valid_index_factory(LL) + + for i, leaf in enumerate(LL): + # We MUST find either an '=' or '+=' symbol... + if leaf.type in [token.EQUAL, token.PLUSEQUAL]: + idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 + + # That symbol MUST be followed by a string... + if is_valid_index(idx) and LL[idx].type == token.STRING: + string_idx = idx + + # Skip the string trailer, if one exists. + string_parser = StringParser() + idx = string_parser.parse(LL, string_idx) + + # The next leaf MAY be a comma iff this line is a part + # of a function argument... + if ( + parent_type(LL[0]) == syms.argument + and is_valid_index(idx) + and LL[idx].type == token.COMMA + ): + idx += 1 + + # But no more leaves are allowed... + if not is_valid_index(idx): + return string_idx + + return None + + @staticmethod + def _dict_or_lambda_match(LL: list[Leaf]) -> int | None: + """ + Returns: + string_idx such that @LL[string_idx] is equal to our target (i.e. + matched) string, if this line matches the dictionary key assignment + statement or lambda expression requirements listed in the + 'Requirements' section of this classes' docstring. + OR + None, otherwise. + """ + # If this line is a part of a dictionary key assignment or lambda expression... + parent_types = [parent_type(LL[0]), parent_type(LL[0].parent)] + if syms.dictsetmaker in parent_types or syms.lambdef in parent_types: + is_valid_index = is_valid_index_factory(LL) + + for i, leaf in enumerate(LL): + # We MUST find a colon, it can either be dict's or lambda's colon... + if leaf.type == token.COLON and i < len(LL) - 1: + idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 + + # That colon MUST be followed by a string... + if is_valid_index(idx) and LL[idx].type == token.STRING: + string_idx = idx + + # Skip the string trailer, if one exists. + string_parser = StringParser() + idx = string_parser.parse(LL, string_idx) + + # That string MAY be followed by a comma... + if is_valid_index(idx) and LL[idx].type == token.COMMA: + idx += 1 + + # But no more leaves are allowed... + if not is_valid_index(idx): + return string_idx + + return None + + def do_transform( + self, line: Line, string_indices: list[int] + ) -> Iterator[TResult[Line]]: + LL = line.leaves + assert len(string_indices) == 1, ( + f"{self.__class__.__name__} should only find one match at a time, found" + f" {len(string_indices)}" + ) + string_idx = string_indices[0] + + is_valid_index = is_valid_index_factory(LL) + insert_str_child = insert_str_child_factory(LL[string_idx]) + + comma_idx = -1 + ends_with_comma = False + if LL[comma_idx].type == token.COMMA: + ends_with_comma = True + + leaves_to_steal_comments_from = [LL[string_idx]] + if ends_with_comma: + leaves_to_steal_comments_from.append(LL[comma_idx]) + + # --- First Line + first_line = line.clone() + left_leaves = LL[:string_idx] + + # We have to remember to account for (possibly invisible) LPAR and RPAR + # leaves that already wrapped the target string. If these leaves do + # exist, we will replace them with our own LPAR and RPAR leaves. + old_parens_exist = False + if left_leaves and left_leaves[-1].type == token.LPAR: + old_parens_exist = True + leaves_to_steal_comments_from.append(left_leaves[-1]) + left_leaves.pop() + + append_leaves(first_line, line, left_leaves) + + lpar_leaf = Leaf(token.LPAR, "(") + if old_parens_exist: + replace_child(LL[string_idx - 1], lpar_leaf) + else: + insert_str_child(lpar_leaf) + first_line.append(lpar_leaf) + + # We throw inline comments that were originally to the right of the + # target string to the top line. They will now be shown to the right of + # the LPAR. + for leaf in leaves_to_steal_comments_from: + for comment_leaf in line.comments_after(leaf): + first_line.append(comment_leaf, preformatted=True) + + yield Ok(first_line) + + # --- Middle (String) Line + # We only need to yield one (possibly too long) string line, since the + # `StringSplitter` will break it down further if necessary. + string_value = LL[string_idx].value + string_line = Line( + mode=line.mode, + depth=line.depth + 1, + inside_brackets=True, + should_split_rhs=line.should_split_rhs, + magic_trailing_comma=line.magic_trailing_comma, + ) + string_leaf = Leaf(token.STRING, string_value) + insert_str_child(string_leaf) + string_line.append(string_leaf) + + old_rpar_leaf = None + if is_valid_index(string_idx + 1): + right_leaves = LL[string_idx + 1 :] + if ends_with_comma: + right_leaves.pop() + + if old_parens_exist: + assert right_leaves and right_leaves[-1].type == token.RPAR, ( + "Apparently, old parentheses do NOT exist?!" + f" (left_leaves={left_leaves}, right_leaves={right_leaves})" + ) + old_rpar_leaf = right_leaves.pop() + elif right_leaves and right_leaves[-1].type == token.RPAR: + # Special case for lambda expressions as dict's value, e.g.: + # my_dict = { + # "key": lambda x: f"formatted: {x}", + # } + # After wrapping the dict's value with parentheses, the string is + # followed by a RPAR but its opening bracket is lambda's, not + # the string's: + # "key": (lambda x: f"formatted: {x}"), + opening_bracket = right_leaves[-1].opening_bracket + if opening_bracket is not None and opening_bracket in left_leaves: + index = left_leaves.index(opening_bracket) + if ( + 0 < index < len(left_leaves) - 1 + and left_leaves[index - 1].type == token.COLON + and left_leaves[index + 1].value == "lambda" + ): + right_leaves.pop() + + append_leaves(string_line, line, right_leaves) + + yield Ok(string_line) + + # --- Last Line + last_line = line.clone() + last_line.bracket_tracker = first_line.bracket_tracker + + new_rpar_leaf = Leaf(token.RPAR, ")") + if old_rpar_leaf is not None: + replace_child(old_rpar_leaf, new_rpar_leaf) + else: + insert_str_child(new_rpar_leaf) + last_line.append(new_rpar_leaf) + + # If the target string ended with a comma, we place this comma to the + # right of the RPAR on the last line. + if ends_with_comma: + comma_leaf = Leaf(token.COMMA, ",") + replace_child(LL[comma_idx], comma_leaf) + last_line.append(comma_leaf) + + yield Ok(last_line) + + +class StringParser: + """ + A state machine that aids in parsing a string's "trailer", which can be + either non-existent, an old-style formatting sequence (e.g. `% varX` or `% + (varX, varY)`), or a method-call / attribute access (e.g. `.format(varX, + varY)`). + + NOTE: A new StringParser object MUST be instantiated for each string + trailer we need to parse. + + Examples: + We shall assume that `line` equals the `Line` object that corresponds + to the following line of python code: + ``` + x = "Some {}.".format("String") + some_other_string + ``` + + Furthermore, we will assume that `string_idx` is some index such that: + ``` + assert line.leaves[string_idx].value == "Some {}." + ``` + + The following code snippet then holds: + ``` + string_parser = StringParser() + idx = string_parser.parse(line.leaves, string_idx) + assert line.leaves[idx].type == token.PLUS + ``` + """ + + DEFAULT_TOKEN: Final = 20210605 + + # String Parser States + START: Final = 1 + DOT: Final = 2 + NAME: Final = 3 + PERCENT: Final = 4 + SINGLE_FMT_ARG: Final = 5 + LPAR: Final = 6 + RPAR: Final = 7 + DONE: Final = 8 + + # Lookup Table for Next State + _goto: Final[dict[tuple[ParserState, NodeType], ParserState]] = { + # A string trailer may start with '.' OR '%'. + (START, token.DOT): DOT, + (START, token.PERCENT): PERCENT, + (START, DEFAULT_TOKEN): DONE, + # A '.' MUST be followed by an attribute or method name. + (DOT, token.NAME): NAME, + # A method name MUST be followed by an '(', whereas an attribute name + # is the last symbol in the string trailer. + (NAME, token.LPAR): LPAR, + (NAME, DEFAULT_TOKEN): DONE, + # A '%' symbol can be followed by an '(' or a single argument (e.g. a + # string or variable name). + (PERCENT, token.LPAR): LPAR, + (PERCENT, DEFAULT_TOKEN): SINGLE_FMT_ARG, + # If a '%' symbol is followed by a single argument, that argument is + # the last leaf in the string trailer. + (SINGLE_FMT_ARG, DEFAULT_TOKEN): DONE, + # If present, a ')' symbol is the last symbol in a string trailer. + # (NOTE: LPARS and nested RPARS are not included in this lookup table, + # since they are treated as a special case by the parsing logic in this + # classes' implementation.) + (RPAR, DEFAULT_TOKEN): DONE, + } + + def __init__(self) -> None: + self._state = self.START + self._unmatched_lpars = 0 + + def parse(self, leaves: list[Leaf], string_idx: int) -> int: + """ + Pre-conditions: + * @leaves[@string_idx].type == token.STRING + + Returns: + The index directly after the last leaf which is a part of the string + trailer, if a "trailer" exists. + OR + @string_idx + 1, if no string "trailer" exists. + """ + assert leaves[string_idx].type == token.STRING + + idx = string_idx + 1 + while idx < len(leaves) and self._next_state(leaves[idx]): + idx += 1 + return idx + + def _next_state(self, leaf: Leaf) -> bool: + """ + Pre-conditions: + * On the first call to this function, @leaf MUST be the leaf that + was directly after the string leaf in question (e.g. if our target + string is `line.leaves[i]` then the first call to this method must + be `line.leaves[i + 1]`). + * On the next call to this function, the leaf parameter passed in + MUST be the leaf directly following @leaf. + + Returns: + True iff @leaf is a part of the string's trailer. + """ + # We ignore empty LPAR or RPAR leaves. + if is_empty_par(leaf): + return True + + next_token = leaf.type + if next_token == token.LPAR: + self._unmatched_lpars += 1 + + current_state = self._state + + # The LPAR parser state is a special case. We will return True until we + # find the matching RPAR token. + if current_state == self.LPAR: + if next_token == token.RPAR: + self._unmatched_lpars -= 1 + if self._unmatched_lpars == 0: + self._state = self.RPAR + # Otherwise, we use a lookup table to determine the next state. + else: + # If the lookup table matches the current state to the next + # token, we use the lookup table. + if (current_state, next_token) in self._goto: + self._state = self._goto[current_state, next_token] + else: + # Otherwise, we check if a the current state was assigned a + # default. + if (current_state, self.DEFAULT_TOKEN) in self._goto: + self._state = self._goto[current_state, self.DEFAULT_TOKEN] + # If no default has been assigned, then this parser has a logic + # error. + else: + raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!") + + if self._state == self.DONE: + return False + + return True + + +def insert_str_child_factory(string_leaf: Leaf) -> Callable[[LN], None]: + """ + Factory for a convenience function that is used to orphan @string_leaf + and then insert multiple new leaves into the same part of the node + structure that @string_leaf had originally occupied. + + Examples: + Let `string_leaf = Leaf(token.STRING, '"foo"')` and `N = + string_leaf.parent`. Assume the node `N` has the following + original structure: + + Node( + expr_stmt, [ + Leaf(NAME, 'x'), + Leaf(EQUAL, '='), + Leaf(STRING, '"foo"'), + ] + ) + + We then run the code snippet shown below. + ``` + insert_str_child = insert_str_child_factory(string_leaf) + + lpar = Leaf(token.LPAR, '(') + insert_str_child(lpar) + + bar = Leaf(token.STRING, '"bar"') + insert_str_child(bar) + + rpar = Leaf(token.RPAR, ')') + insert_str_child(rpar) + ``` + + After which point, it follows that `string_leaf.parent is None` and + the node `N` now has the following structure: + + Node( + expr_stmt, [ + Leaf(NAME, 'x'), + Leaf(EQUAL, '='), + Leaf(LPAR, '('), + Leaf(STRING, '"bar"'), + Leaf(RPAR, ')'), + ] + ) + """ + string_parent = string_leaf.parent + string_child_idx = string_leaf.remove() + + def insert_str_child(child: LN) -> None: + nonlocal string_child_idx + + assert string_parent is not None + assert string_child_idx is not None + + string_parent.insert_child(string_child_idx, child) + string_child_idx += 1 + + return insert_str_child + + +def is_valid_index_factory(seq: Sequence[Any]) -> Callable[[int], bool]: + """ + Examples: + ``` + my_list = [1, 2, 3] + + is_valid_index = is_valid_index_factory(my_list) + + assert is_valid_index(0) + assert is_valid_index(2) + + assert not is_valid_index(3) + assert not is_valid_index(-1) + ``` + """ + + def is_valid_index(idx: int) -> bool: + """ + Returns: + True iff @idx is positive AND seq[@idx] does NOT raise an + IndexError. + """ + return 0 <= idx < len(seq) + + return is_valid_index diff --git a/py311/lib/python3.11/site-packages/botocore/__init__.py b/py311/lib/python3.11/site-packages/botocore/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8e6fa3ffc5bb2382fec47ec155e810c960eeefdc --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/__init__.py @@ -0,0 +1,215 @@ +# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/ +# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +import logging +import os +import re +from logging import NullHandler + +__version__ = '1.42.27' + + +# Configure default logger to do nothing +log = logging.getLogger('botocore') +log.addHandler(NullHandler()) + +_INITIALIZERS = [] + +_first_cap_regex = re.compile('(.)([A-Z][a-z]+)') +_end_cap_regex = re.compile('([a-z0-9])([A-Z])') +# The regex below handles the special case where some acronym +# name is pluralized, e.g GatewayARNs, ListWebACLs, SomeCNAMEs. +_special_case_transform = re.compile('[A-Z]{2,}s$') +# Prepopulate the cache with special cases that don't match +# our regular transformation. +_xform_cache = { + ('CreateCachediSCSIVolume', '_'): 'create_cached_iscsi_volume', + ('CreateCachediSCSIVolume', '-'): 'create-cached-iscsi-volume', + ('DescribeCachediSCSIVolumes', '_'): 'describe_cached_iscsi_volumes', + ('DescribeCachediSCSIVolumes', '-'): 'describe-cached-iscsi-volumes', + ('DescribeStorediSCSIVolumes', '_'): 'describe_stored_iscsi_volumes', + ('DescribeStorediSCSIVolumes', '-'): 'describe-stored-iscsi-volumes', + ('CreateStorediSCSIVolume', '_'): 'create_stored_iscsi_volume', + ('CreateStorediSCSIVolume', '-'): 'create-stored-iscsi-volume', + ('ListHITsForQualificationType', '_'): 'list_hits_for_qualification_type', + ('ListHITsForQualificationType', '-'): 'list-hits-for-qualification-type', + ('ExecutePartiQLStatement', '_'): 'execute_partiql_statement', + ('ExecutePartiQLStatement', '-'): 'execute-partiql-statement', + ('ExecutePartiQLTransaction', '_'): 'execute_partiql_transaction', + ('ExecutePartiQLTransaction', '-'): 'execute-partiql-transaction', + ('ExecutePartiQLBatch', '_'): 'execute_partiql_batch', + ('ExecutePartiQLBatch', '-'): 'execute-partiql-batch', + ( + 'AssociateWhatsAppBusinessAccount', + '_', + ): 'associate_whatsapp_business_account', + ( + 'AssociateWhatsAppBusinessAccount', + '-', + ): 'associate-whatsapp-business-account', + ('CreateWhatsAppMessageTemplate', '_'): 'create_whatsapp_message_template', + ('CreateWhatsAppMessageTemplate', '-'): 'create-whatsapp-message-template', + ( + 'CreateWhatsAppMessageTemplateFromLibrary', + '_', + ): 'create_whatsapp_message_template_from_library', + ( + 'CreateWhatsAppMessageTemplateFromLibrary', + '-', + ): 'create-whatsapp-message-template-from-library', + ( + 'CreateWhatsAppMessageTemplateMedia', + '_', + ): 'create_whatsapp_message_template_media', + ( + 'CreateWhatsAppMessageTemplateMedia', + '-', + ): 'create-whatsapp-message-template-media', + ('DeleteWhatsAppMessageMedia', '_'): 'delete_whatsapp_message_media', + ('DeleteWhatsAppMessageMedia', '-'): 'delete-whatsapp-message-media', + ('DeleteWhatsAppMessageTemplate', '_'): 'delete_whatsapp_message_template', + ('DeleteWhatsAppMessageTemplate', '-'): 'delete-whatsapp-message-template', + ( + 'DisassociateWhatsAppBusinessAccount', + '_', + ): 'disassociate_whatsapp_business_account', + ( + 'DisassociateWhatsAppBusinessAccount', + '-', + ): 'disassociate-whatsapp-business-account', + ( + 'GetLinkedWhatsAppBusinessAccount', + '_', + ): 'get_linked_whatsapp_business_account', + ( + 'GetLinkedWhatsAppBusinessAccount', + '-', + ): 'get-linked-whatsapp-business-account', + ( + 'GetLinkedWhatsAppBusinessAccountPhoneNumber', + '_', + ): 'get_linked_whatsapp_business_account_phone_number', + ( + 'GetLinkedWhatsAppBusinessAccountPhoneNumber', + '-', + ): 'get-linked-whatsapp-business-account-phone-number', + ('GetWhatsAppMessageMedia', '_'): 'get_whatsapp_message_media', + ('GetWhatsAppMessageMedia', '-'): 'get-whatsapp-message-media', + ('GetWhatsAppMessageTemplate', '_'): 'get_whatsapp_message_template', + ('GetWhatsAppMessageTemplate', '-'): 'get-whatsapp-message-template', + ( + 'ListLinkedWhatsAppBusinessAccounts', + '_', + ): 'list_linked_whatsapp_business_accounts', + ( + 'ListLinkedWhatsAppBusinessAccounts', + '-', + ): 'list-linked-whatsapp-business-accounts', + ('ListWhatsAppMessageTemplates', '_'): 'list_whatsapp_message_templates', + ('ListWhatsAppMessageTemplates', '-'): 'list-whatsapp-message-templates', + ('ListWhatsAppTemplateLibrary', '_'): 'list_whatsapp_template_library', + ('ListWhatsAppTemplateLibrary', '-'): 'list-whatsapp-template-library', + ('PostWhatsAppMessageMedia', '_'): 'post_whatsapp_message_media', + ('PostWhatsAppMessageMedia', '-'): 'post-whatsapp-message-media', + ( + 'PutWhatsAppBusinessAccountEventDestinations', + '_', + ): 'put_whatsapp_business_account_event_destinations', + ( + 'PutWhatsAppBusinessAccountEventDestinations', + '-', + ): 'put-whatsapp-business-account-event-destinations', + ('SendWhatsAppMessage', '_'): 'send_whatsapp_message', + ('SendWhatsAppMessage', '-'): 'send-whatsapp-message', + ('UpdateWhatsAppMessageTemplate', '_'): 'update_whatsapp_message_template', + ('UpdateWhatsAppMessageTemplate', '-'): 'update-whatsapp-message-template', +} +ScalarTypes = ('string', 'integer', 'boolean', 'timestamp', 'float', 'double') + +BOTOCORE_ROOT = os.path.dirname(os.path.abspath(__file__)) + + +# Used to specify anonymous (unsigned) request signature +class UNSIGNED: + def __copy__(self): + return self + + def __deepcopy__(self, memodict): + return self + + +UNSIGNED = UNSIGNED() + + +def xform_name(name, sep='_', _xform_cache=_xform_cache): + """Convert camel case to a "pythonic" name. + + If the name contains the ``sep`` character, then it is + returned unchanged. + + """ + if sep in name: + # If the sep is in the name, assume that it's already + # transformed and return the string unchanged. + return name + key = (name, sep) + if key not in _xform_cache: + if _special_case_transform.search(name) is not None: + is_special = _special_case_transform.search(name) + matched = is_special.group() + # Replace something like ARNs, ACLs with _arns, _acls. + name = f"{name[: -len(matched)]}{sep}{matched.lower()}" + s1 = _first_cap_regex.sub(r'\1' + sep + r'\2', name) + transformed = _end_cap_regex.sub(r'\1' + sep + r'\2', s1).lower() + _xform_cache[key] = transformed + return _xform_cache[key] + + +def register_initializer(callback): + """Register an initializer function for session creation. + + This initializer function will be invoked whenever a new + `botocore.session.Session` is instantiated. + + :type callback: callable + :param callback: A callable that accepts a single argument + of type `botocore.session.Session`. + + """ + _INITIALIZERS.append(callback) + + +def unregister_initializer(callback): + """Unregister an initializer function. + + :type callback: callable + :param callback: A callable that was previously registered + with `botocore.register_initializer`. + + :raises ValueError: If a callback is provided that is not currently + registered as an initializer. + + """ + _INITIALIZERS.remove(callback) + + +def invoke_initializers(session): + """Invoke all initializers for a session. + + :type session: botocore.session.Session + :param session: The session to initialize. + + """ + for initializer in _INITIALIZERS: + initializer(session) diff --git a/py311/lib/python3.11/site-packages/botocore/__pycache__/args.cpython-311.pyc b/py311/lib/python3.11/site-packages/botocore/__pycache__/args.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d487db7b2b47ad450aca4d2cebe16eec5eb3f25b Binary files /dev/null and b/py311/lib/python3.11/site-packages/botocore/__pycache__/args.cpython-311.pyc differ diff --git a/py311/lib/python3.11/site-packages/botocore/__pycache__/auth.cpython-311.pyc b/py311/lib/python3.11/site-packages/botocore/__pycache__/auth.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..453e694a080e2c81a9cd5ffef615deb45020b09d Binary files /dev/null and b/py311/lib/python3.11/site-packages/botocore/__pycache__/auth.cpython-311.pyc differ diff --git a/py311/lib/python3.11/site-packages/botocore/__pycache__/configloader.cpython-311.pyc b/py311/lib/python3.11/site-packages/botocore/__pycache__/configloader.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a5ca52de57e075dc36431a3e6e5e2088c3ff2cd Binary files /dev/null and b/py311/lib/python3.11/site-packages/botocore/__pycache__/configloader.cpython-311.pyc differ diff --git a/py311/lib/python3.11/site-packages/botocore/__pycache__/discovery.cpython-311.pyc b/py311/lib/python3.11/site-packages/botocore/__pycache__/discovery.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b15173868aca0b5f3a38c7edef1f546ae5ba5c9 Binary files /dev/null and b/py311/lib/python3.11/site-packages/botocore/__pycache__/discovery.cpython-311.pyc differ diff --git a/py311/lib/python3.11/site-packages/botocore/args.py b/py311/lib/python3.11/site-packages/botocore/args.py new file mode 100644 index 0000000000000000000000000000000000000000..2a76e879ef007aed47dc36295c8aff3db03e9a9a --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/args.py @@ -0,0 +1,998 @@ +# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +"""Internal module to help with normalizing botocore client args. + +This module (and all function/classes within this module) should be +considered internal, and *not* a public API. + +""" + +import copy +import logging +import socket + +import botocore.exceptions +import botocore.parsers +import botocore.serialize +from botocore.config import Config +from botocore.endpoint import EndpointCreator +from botocore.regions import EndpointResolverBuiltins as EPRBuiltins +from botocore.regions import EndpointRulesetResolver +from botocore.signers import RequestSigner +from botocore.useragent import UserAgentString, register_feature_id +from botocore.utils import ( + PRIORITY_ORDERED_SUPPORTED_PROTOCOLS, # noqa: F401 + ensure_boolean, + is_s3_accelerate_url, +) + +logger = logging.getLogger(__name__) + + +VALID_REGIONAL_ENDPOINTS_CONFIG = [ + 'legacy', + 'regional', +] +LEGACY_GLOBAL_STS_REGIONS = [ + 'ap-northeast-1', + 'ap-south-1', + 'ap-southeast-1', + 'ap-southeast-2', + 'aws-global', + 'ca-central-1', + 'eu-central-1', + 'eu-north-1', + 'eu-west-1', + 'eu-west-2', + 'eu-west-3', + 'sa-east-1', + 'us-east-1', + 'us-east-2', + 'us-west-1', + 'us-west-2', +] +# Maximum allowed length of the ``user_agent_appid`` config field. Longer +# values result in a warning-level log message. +USERAGENT_APPID_MAXLEN = 50 + +VALID_REQUEST_CHECKSUM_CALCULATION_CONFIG = ( + "when_supported", + "when_required", +) +VALID_RESPONSE_CHECKSUM_VALIDATION_CONFIG = ( + "when_supported", + "when_required", +) + + +VALID_ACCOUNT_ID_ENDPOINT_MODE_CONFIG = ( + 'preferred', + 'disabled', + 'required', +) + + +class ClientArgsCreator: + def __init__( + self, + event_emitter, + user_agent, + response_parser_factory, + loader, + exceptions_factory, + config_store, + user_agent_creator=None, + ): + self._event_emitter = event_emitter + self._response_parser_factory = response_parser_factory + self._loader = loader + self._exceptions_factory = exceptions_factory + self._config_store = config_store + if user_agent_creator is None: + self._session_ua_creator = UserAgentString.from_environment() + else: + self._session_ua_creator = user_agent_creator + + def get_client_args( + self, + service_model, + region_name, + is_secure, + endpoint_url, + verify, + credentials, + scoped_config, + client_config, + endpoint_bridge, + auth_token=None, + endpoints_ruleset_data=None, + partition_data=None, + ): + final_args = self.compute_client_args( + service_model, + client_config, + endpoint_bridge, + region_name, + endpoint_url, + is_secure, + scoped_config, + ) + + service_name = final_args['service_name'] # noqa + parameter_validation = final_args['parameter_validation'] + endpoint_config = final_args['endpoint_config'] + protocol = final_args['protocol'] + config_kwargs = final_args['config_kwargs'] + s3_config = final_args['s3_config'] + partition = endpoint_config['metadata'].get('partition', None) + socket_options = final_args['socket_options'] + configured_endpoint_url = final_args['configured_endpoint_url'] + signing_region = endpoint_config['signing_region'] + endpoint_region_name = endpoint_config['region_name'] + account_id_endpoint_mode = config_kwargs['account_id_endpoint_mode'] + + event_emitter = copy.copy(self._event_emitter) + signer = RequestSigner( + service_model.service_id, + signing_region, + endpoint_config['signing_name'], + endpoint_config['signature_version'], + credentials, + event_emitter, + auth_token, + ) + + config_kwargs['s3'] = s3_config + new_config = Config(**config_kwargs) + endpoint_creator = EndpointCreator(event_emitter) + + endpoint = endpoint_creator.create_endpoint( + service_model, + region_name=endpoint_region_name, + endpoint_url=endpoint_config['endpoint_url'], + verify=verify, + response_parser_factory=self._response_parser_factory, + max_pool_connections=new_config.max_pool_connections, + proxies=new_config.proxies, + timeout=(new_config.connect_timeout, new_config.read_timeout), + socket_options=socket_options, + client_cert=new_config.client_cert, + proxies_config=new_config.proxies_config, + ) + + # Emit event to allow service-specific or customer customization of serializer kwargs + event_name = f'creating-serializer.{service_name}' + serializer_kwargs = { + 'timestamp_precision': botocore.serialize.TIMESTAMP_PRECISION_DEFAULT + } + event_emitter.emit( + event_name, + protocol_name=protocol, + service_model=service_model, + serializer_kwargs=serializer_kwargs, + ) + + serializer = botocore.serialize.create_serializer( + protocol, + parameter_validation, + timestamp_precision=serializer_kwargs['timestamp_precision'], + ) + response_parser = botocore.parsers.create_parser(protocol) + + ruleset_resolver = self._build_endpoint_resolver( + endpoints_ruleset_data, + partition_data, + client_config, + service_model, + endpoint_region_name, + region_name, + configured_endpoint_url, + endpoint, + is_secure, + endpoint_bridge, + event_emitter, + credentials, + account_id_endpoint_mode, + ) + + # Copy the session's user agent factory and adds client configuration. + client_ua_creator = self._session_ua_creator.with_client_config( + new_config + ) + supplied_ua = client_config.user_agent if client_config else None + new_config._supplied_user_agent = supplied_ua + + return { + 'serializer': serializer, + 'endpoint': endpoint, + 'response_parser': response_parser, + 'event_emitter': event_emitter, + 'request_signer': signer, + 'service_model': service_model, + 'loader': self._loader, + 'client_config': new_config, + 'partition': partition, + 'exceptions_factory': self._exceptions_factory, + 'endpoint_ruleset_resolver': ruleset_resolver, + 'user_agent_creator': client_ua_creator, + } + + def compute_client_args( + self, + service_model, + client_config, + endpoint_bridge, + region_name, + endpoint_url, + is_secure, + scoped_config, + ): + service_name = service_model.endpoint_prefix + protocol = service_model.resolved_protocol + parameter_validation = True + if client_config and not client_config.parameter_validation: + parameter_validation = False + elif scoped_config: + raw_value = scoped_config.get('parameter_validation') + if raw_value is not None: + parameter_validation = ensure_boolean(raw_value) + + s3_config = self.compute_s3_config(client_config) + + configured_endpoint_url = self._compute_configured_endpoint_url( + client_config=client_config, + endpoint_url=endpoint_url, + ) + if configured_endpoint_url is not None: + register_feature_id('ENDPOINT_OVERRIDE') + + endpoint_config = self._compute_endpoint_config( + service_name=service_name, + region_name=region_name, + endpoint_url=configured_endpoint_url, + is_secure=is_secure, + endpoint_bridge=endpoint_bridge, + s3_config=s3_config, + ) + endpoint_variant_tags = endpoint_config['metadata'].get('tags', []) + + # Some third-party libraries expect the final user-agent string in + # ``client.meta.config.user_agent``. To maintain backwards + # compatibility, the preliminary user-agent string (before any Config + # object modifications and without request-specific user-agent + # components) is stored in the new Config object's ``user_agent`` + # property but not used by Botocore itself. + preliminary_ua_string = self._session_ua_creator.with_client_config( + client_config + ).to_string() + # Create a new client config to be passed to the client based + # on the final values. We do not want the user to be able + # to try to modify an existing client with a client config. + config_kwargs = dict( + region_name=endpoint_config['region_name'], + signature_version=endpoint_config['signature_version'], + user_agent=preliminary_ua_string, + ) + if 'dualstack' in endpoint_variant_tags: + config_kwargs.update(use_dualstack_endpoint=True) + if 'fips' in endpoint_variant_tags: + config_kwargs.update(use_fips_endpoint=True) + if client_config is not None: + config_kwargs.update( + connect_timeout=client_config.connect_timeout, + read_timeout=client_config.read_timeout, + max_pool_connections=client_config.max_pool_connections, + proxies=client_config.proxies, + proxies_config=client_config.proxies_config, + retries=client_config.retries, + client_cert=client_config.client_cert, + inject_host_prefix=client_config.inject_host_prefix, + tcp_keepalive=client_config.tcp_keepalive, + user_agent_extra=client_config.user_agent_extra, + user_agent_appid=client_config.user_agent_appid, + request_min_compression_size_bytes=( + client_config.request_min_compression_size_bytes + ), + disable_request_compression=( + client_config.disable_request_compression + ), + client_context_params=client_config.client_context_params, + sigv4a_signing_region_set=( + client_config.sigv4a_signing_region_set + ), + request_checksum_calculation=( + client_config.request_checksum_calculation + ), + response_checksum_validation=( + client_config.response_checksum_validation + ), + account_id_endpoint_mode=client_config.account_id_endpoint_mode, + auth_scheme_preference=client_config.auth_scheme_preference, + ) + self._compute_retry_config(config_kwargs) + self._compute_connect_timeout(config_kwargs) + self._compute_user_agent_appid_config(config_kwargs) + self._compute_request_compression_config(config_kwargs) + self._compute_sigv4a_signing_region_set_config(config_kwargs) + self._compute_checksum_config(config_kwargs) + self._compute_account_id_endpoint_mode_config(config_kwargs) + self._compute_inject_host_prefix(client_config, config_kwargs) + self._compute_auth_scheme_preference_config( + client_config, config_kwargs + ) + self._compute_signature_version_config(client_config, config_kwargs) + s3_config = self.compute_s3_config(client_config) + + is_s3_service = self._is_s3_service(service_name) + + if is_s3_service and 'dualstack' in endpoint_variant_tags: + if s3_config is None: + s3_config = {} + s3_config['use_dualstack_endpoint'] = True + + return { + 'service_name': service_name, + 'parameter_validation': parameter_validation, + 'configured_endpoint_url': configured_endpoint_url, + 'endpoint_config': endpoint_config, + 'protocol': protocol, + 'config_kwargs': config_kwargs, + 's3_config': s3_config, + 'socket_options': self._compute_socket_options( + scoped_config, client_config + ), + } + + def _compute_inject_host_prefix(self, client_config, config_kwargs): + # In the cases that a Config object was not provided, or the private value + # remained UNSET, we should resolve the value from the config store. + if ( + client_config is None + or client_config._inject_host_prefix == 'UNSET' + ): + configured_disable_host_prefix_injection = ( + self._config_store.get_config_variable( + 'disable_host_prefix_injection' + ) + ) + if configured_disable_host_prefix_injection is not None: + config_kwargs[ + 'inject_host_prefix' + ] = not configured_disable_host_prefix_injection + else: + config_kwargs['inject_host_prefix'] = True + + def _compute_configured_endpoint_url(self, client_config, endpoint_url): + if endpoint_url is not None: + return endpoint_url + + if self._ignore_configured_endpoint_urls(client_config): + logger.debug("Ignoring configured endpoint URLs.") + return endpoint_url + + return self._config_store.get_config_variable('endpoint_url') + + def _ignore_configured_endpoint_urls(self, client_config): + if ( + client_config + and client_config.ignore_configured_endpoint_urls is not None + ): + return client_config.ignore_configured_endpoint_urls + + return self._config_store.get_config_variable( + 'ignore_configured_endpoint_urls' + ) + + def compute_s3_config(self, client_config): + s3_configuration = self._config_store.get_config_variable('s3') + + # Next specific client config values takes precedence over + # specific values in the scoped config. + if client_config is not None: + if client_config.s3 is not None: + if s3_configuration is None: + s3_configuration = client_config.s3 + else: + # The current s3_configuration dictionary may be + # from a source that only should be read from so + # we want to be safe and just make a copy of it to modify + # before it actually gets updated. + s3_configuration = s3_configuration.copy() + s3_configuration.update(client_config.s3) + + return s3_configuration + + def _is_s3_service(self, service_name): + """Whether the service is S3 or S3 Control. + + Note that throughout this class, service_name refers to the endpoint + prefix, not the folder name of the service in botocore/data. For + S3 Control, the folder name is 's3control' but the endpoint prefix is + 's3-control'. + """ + return service_name in ['s3', 's3-control'] + + def _compute_endpoint_config( + self, + service_name, + region_name, + endpoint_url, + is_secure, + endpoint_bridge, + s3_config, + ): + resolve_endpoint_kwargs = { + 'service_name': service_name, + 'region_name': region_name, + 'endpoint_url': endpoint_url, + 'is_secure': is_secure, + 'endpoint_bridge': endpoint_bridge, + } + if service_name == 's3': + return self._compute_s3_endpoint_config( + s3_config=s3_config, **resolve_endpoint_kwargs + ) + if service_name == 'sts': + return self._compute_sts_endpoint_config(**resolve_endpoint_kwargs) + return self._resolve_endpoint(**resolve_endpoint_kwargs) + + def _compute_s3_endpoint_config( + self, s3_config, **resolve_endpoint_kwargs + ): + force_s3_global = self._should_force_s3_global( + resolve_endpoint_kwargs['region_name'], s3_config + ) + if force_s3_global: + resolve_endpoint_kwargs['region_name'] = None + endpoint_config = self._resolve_endpoint(**resolve_endpoint_kwargs) + self._set_region_if_custom_s3_endpoint( + endpoint_config, resolve_endpoint_kwargs['endpoint_bridge'] + ) + # For backwards compatibility reasons, we want to make sure the + # client.meta.region_name will remain us-east-1 if we forced the + # endpoint to be the global region. Specifically, if this value + # changes to aws-global, it breaks logic where a user is checking + # for us-east-1 as the global endpoint such as in creating buckets. + if force_s3_global and endpoint_config['region_name'] == 'aws-global': + endpoint_config['region_name'] = 'us-east-1' + return endpoint_config + + def _should_force_s3_global(self, region_name, s3_config): + s3_regional_config = 'legacy' + if s3_config and 'us_east_1_regional_endpoint' in s3_config: + s3_regional_config = s3_config['us_east_1_regional_endpoint'] + self._validate_s3_regional_config(s3_regional_config) + + is_global_region = region_name in ('us-east-1', None) + return s3_regional_config == 'legacy' and is_global_region + + def _validate_s3_regional_config(self, config_val): + if config_val not in VALID_REGIONAL_ENDPOINTS_CONFIG: + raise botocore.exceptions.InvalidS3UsEast1RegionalEndpointConfigError( + s3_us_east_1_regional_endpoint_config=config_val + ) + + def _set_region_if_custom_s3_endpoint( + self, endpoint_config, endpoint_bridge + ): + # If a user is providing a custom URL, the endpoint resolver will + # refuse to infer a signing region. If we want to default to s3v4, + # we have to account for this. + if ( + endpoint_config['signing_region'] is None + and endpoint_config['region_name'] is None + ): + endpoint = endpoint_bridge.resolve('s3') + endpoint_config['signing_region'] = endpoint['signing_region'] + endpoint_config['region_name'] = endpoint['region_name'] + + def _compute_sts_endpoint_config(self, **resolve_endpoint_kwargs): + endpoint_config = self._resolve_endpoint(**resolve_endpoint_kwargs) + if self._should_set_global_sts_endpoint( + resolve_endpoint_kwargs['region_name'], + resolve_endpoint_kwargs['endpoint_url'], + endpoint_config, + ): + self._set_global_sts_endpoint( + endpoint_config, resolve_endpoint_kwargs['is_secure'] + ) + return endpoint_config + + def _should_set_global_sts_endpoint( + self, region_name, endpoint_url, endpoint_config + ): + has_variant_tags = endpoint_config and endpoint_config.get( + 'metadata', {} + ).get('tags') + if endpoint_url or has_variant_tags: + return False + return ( + self._get_sts_regional_endpoints_config() == 'legacy' + and region_name in LEGACY_GLOBAL_STS_REGIONS + ) + + def _get_sts_regional_endpoints_config(self): + sts_regional_endpoints_config = self._config_store.get_config_variable( + 'sts_regional_endpoints' + ) + if not sts_regional_endpoints_config: + sts_regional_endpoints_config = 'regional' + if ( + sts_regional_endpoints_config + not in VALID_REGIONAL_ENDPOINTS_CONFIG + ): + raise botocore.exceptions.InvalidSTSRegionalEndpointsConfigError( + sts_regional_endpoints_config=sts_regional_endpoints_config + ) + return sts_regional_endpoints_config + + def _set_global_sts_endpoint(self, endpoint_config, is_secure): + scheme = 'https' if is_secure else 'http' + endpoint_config['endpoint_url'] = f'{scheme}://sts.amazonaws.com' + endpoint_config['signing_region'] = 'us-east-1' + + def _resolve_endpoint( + self, + service_name, + region_name, + endpoint_url, + is_secure, + endpoint_bridge, + ): + return endpoint_bridge.resolve( + service_name, region_name, endpoint_url, is_secure + ) + + def _compute_socket_options(self, scoped_config, client_config=None): + # This disables Nagle's algorithm and is the default socket options + # in urllib3. + socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)] + client_keepalive = client_config and client_config.tcp_keepalive + scoped_keepalive = scoped_config and self._ensure_boolean( + scoped_config.get("tcp_keepalive", False) + ) + # Enables TCP Keepalive if specified in client config object or shared config file. + if client_keepalive or scoped_keepalive: + socket_options.append((socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)) + return socket_options + + def _compute_retry_config(self, config_kwargs): + self._compute_retry_max_attempts(config_kwargs) + self._compute_retry_mode(config_kwargs) + + def _compute_retry_max_attempts(self, config_kwargs): + # There's a pre-existing max_attempts client config value that actually + # means max *retry* attempts. There's also a `max_attempts` we pull + # from the config store that means *total attempts*, which includes the + # intitial request. We can't change what `max_attempts` means in + # client config so we try to normalize everything to a new + # "total_max_attempts" variable. We ensure that after this, the only + # configuration for "max attempts" is the 'total_max_attempts' key. + # An explicitly provided max_attempts in the client config + # overrides everything. + retries = config_kwargs.get('retries') + if retries is not None: + if 'total_max_attempts' in retries: + retries.pop('max_attempts', None) + return + if 'max_attempts' in retries: + value = retries.pop('max_attempts') + # client config max_attempts means total retries so we + # have to add one for 'total_max_attempts' to account + # for the initial request. + retries['total_max_attempts'] = value + 1 + return + # Otherwise we'll check the config store which checks env vars, + # config files, etc. There is no default value for max_attempts + # so if this returns None and we don't set a default value here. + max_attempts = self._config_store.get_config_variable('max_attempts') + if max_attempts is not None: + if retries is None: + retries = {} + config_kwargs['retries'] = retries + retries['total_max_attempts'] = max_attempts + + def _compute_retry_mode(self, config_kwargs): + retries = config_kwargs.get('retries') + if retries is None: + retries = {} + config_kwargs['retries'] = retries + elif 'mode' in retries: + # If there's a retry mode explicitly set in the client config + # that overrides everything. + return + retry_mode = self._config_store.get_config_variable('retry_mode') + if retry_mode is None: + retry_mode = 'legacy' + retries['mode'] = retry_mode + + def _compute_connect_timeout(self, config_kwargs): + # Checking if connect_timeout is set on the client config. + # If it is not, we check the config_store in case a + # non legacy default mode has been configured. + connect_timeout = config_kwargs.get('connect_timeout') + if connect_timeout is not None: + return + connect_timeout = self._config_store.get_config_variable( + 'connect_timeout' + ) + if connect_timeout: + config_kwargs['connect_timeout'] = connect_timeout + + def _compute_request_compression_config(self, config_kwargs): + min_size = config_kwargs.get('request_min_compression_size_bytes') + disabled = config_kwargs.get('disable_request_compression') + if min_size is None: + min_size = self._config_store.get_config_variable( + 'request_min_compression_size_bytes' + ) + # conversion func is skipped so input validation must be done here + # regardless if the value is coming from the config store or the + # config object + min_size = self._validate_min_compression_size(min_size) + config_kwargs['request_min_compression_size_bytes'] = min_size + + if disabled is None: + disabled = self._config_store.get_config_variable( + 'disable_request_compression' + ) + else: + # if the user provided a value we must check if it's a boolean + disabled = ensure_boolean(disabled) + config_kwargs['disable_request_compression'] = disabled + + def _validate_min_compression_size(self, min_size): + min_allowed_min_size = 1 + max_allowed_min_size = 1048576 + error_msg_base = ( + f'Invalid value "{min_size}" for ' + 'request_min_compression_size_bytes.' + ) + try: + min_size = int(min_size) + except (ValueError, TypeError): + msg = ( + f'{error_msg_base} Value must be an integer. ' + f'Received {type(min_size)} instead.' + ) + raise botocore.exceptions.InvalidConfigError(error_msg=msg) + if not min_allowed_min_size <= min_size <= max_allowed_min_size: + msg = ( + f'{error_msg_base} Value must be between ' + f'{min_allowed_min_size} and {max_allowed_min_size}.' + ) + raise botocore.exceptions.InvalidConfigError(error_msg=msg) + + return min_size + + def _ensure_boolean(self, val): + if isinstance(val, bool): + return val + else: + return val.lower() == 'true' + + def _build_endpoint_resolver( + self, + endpoints_ruleset_data, + partition_data, + client_config, + service_model, + endpoint_region_name, + region_name, + endpoint_url, + endpoint, + is_secure, + endpoint_bridge, + event_emitter, + credentials, + account_id_endpoint_mode, + ): + if endpoints_ruleset_data is None: + return None + + # The legacy EndpointResolver is global to the session, but + # EndpointRulesetResolver is service-specific. Builtins for + # EndpointRulesetResolver must not be derived from the legacy + # endpoint resolver's output, including final_args, s3_config, + # etc. + s3_config_raw = self.compute_s3_config(client_config) or {} + service_name_raw = service_model.endpoint_prefix + # Maintain complex logic for s3 and sts endpoints for backwards + # compatibility. + if service_name_raw in ['s3', 'sts'] or region_name is None: + eprv2_region_name = endpoint_region_name + else: + eprv2_region_name = region_name + resolver_builtins = self.compute_endpoint_resolver_builtin_defaults( + region_name=eprv2_region_name, + service_name=service_name_raw, + s3_config=s3_config_raw, + endpoint_bridge=endpoint_bridge, + client_endpoint_url=endpoint_url, + legacy_endpoint_url=endpoint.host, + credentials=credentials, + account_id_endpoint_mode=account_id_endpoint_mode, + ) + # Client context params for s3 conflict with the available settings + # in the `s3` parameter on the `Config` object. If the same parameter + # is set in both places, the value in the `s3` parameter takes priority. + if client_config is not None: + client_context = client_config.client_context_params or {} + else: + client_context = {} + if self._is_s3_service(service_name_raw): + client_context.update(s3_config_raw) + + sig_version = ( + client_config.signature_version + if client_config is not None + else None + ) + return EndpointRulesetResolver( + endpoint_ruleset_data=endpoints_ruleset_data, + partition_data=partition_data, + service_model=service_model, + builtins=resolver_builtins, + client_context=client_context, + event_emitter=event_emitter, + use_ssl=is_secure, + requested_auth_scheme=sig_version, + ) + + def compute_endpoint_resolver_builtin_defaults( + self, + region_name, + service_name, + s3_config, + endpoint_bridge, + client_endpoint_url, + legacy_endpoint_url, + credentials, + account_id_endpoint_mode, + ): + # EndpointRulesetResolver rulesets may accept an "SDK::Endpoint" as + # input. If the endpoint_url argument of create_client() is set, it + # always takes priority. + if client_endpoint_url: + given_endpoint = client_endpoint_url + # If an endpoints.json data file other than the one bundled within + # the botocore/data directory is used, the output of legacy + # endpoint resolution is provided to EndpointRulesetResolver. + elif not endpoint_bridge.resolver_uses_builtin_data(): + given_endpoint = legacy_endpoint_url + else: + given_endpoint = None + + # The endpoint rulesets differ from legacy botocore behavior in whether + # forcing path style addressing in incompatible situations raises an + # exception or silently ignores the config setting. The + # AWS_S3_FORCE_PATH_STYLE parameter is adjusted both here and for each + # operation so that the ruleset behavior is backwards compatible. + if s3_config.get('use_accelerate_endpoint', False): + force_path_style = False + elif client_endpoint_url is not None and not is_s3_accelerate_url( + client_endpoint_url + ): + force_path_style = s3_config.get('addressing_style') != 'virtual' + else: + force_path_style = s3_config.get('addressing_style') == 'path' + + return { + EPRBuiltins.AWS_REGION: region_name, + EPRBuiltins.AWS_USE_FIPS: ( + # SDK_ENDPOINT cannot be combined with AWS_USE_FIPS + given_endpoint is None + # use legacy resolver's _resolve_endpoint_variant_config_var() + # or default to False if it returns None + and endpoint_bridge._resolve_endpoint_variant_config_var( + 'use_fips_endpoint' + ) + or False + ), + EPRBuiltins.AWS_USE_DUALSTACK: ( + # SDK_ENDPOINT cannot be combined with AWS_USE_DUALSTACK + given_endpoint is None + # use legacy resolver's _resolve_use_dualstack_endpoint() and + # or default to False if it returns None + and endpoint_bridge._resolve_use_dualstack_endpoint( + service_name + ) + or False + ), + EPRBuiltins.AWS_STS_USE_GLOBAL_ENDPOINT: ( + self._should_set_global_sts_endpoint( + region_name=region_name, + endpoint_url=None, + endpoint_config=None, + ) + ), + EPRBuiltins.AWS_S3_USE_GLOBAL_ENDPOINT: ( + self._should_force_s3_global(region_name, s3_config) + ), + EPRBuiltins.AWS_S3_ACCELERATE: s3_config.get( + 'use_accelerate_endpoint', False + ), + EPRBuiltins.AWS_S3_FORCE_PATH_STYLE: force_path_style, + EPRBuiltins.AWS_S3_USE_ARN_REGION: s3_config.get( + 'use_arn_region', True + ), + EPRBuiltins.AWS_S3CONTROL_USE_ARN_REGION: s3_config.get( + 'use_arn_region', False + ), + EPRBuiltins.AWS_S3_DISABLE_MRAP: s3_config.get( + 's3_disable_multiregion_access_points', False + ), + EPRBuiltins.SDK_ENDPOINT: given_endpoint, + EPRBuiltins.ACCOUNT_ID: credentials.get_deferred_property( + 'account_id' + ) + if credentials + else None, + EPRBuiltins.ACCOUNT_ID_ENDPOINT_MODE: account_id_endpoint_mode, + } + + def _compute_user_agent_appid_config(self, config_kwargs): + user_agent_appid = config_kwargs.get('user_agent_appid') + if user_agent_appid is None: + user_agent_appid = self._config_store.get_config_variable( + 'user_agent_appid' + ) + if ( + user_agent_appid is not None + and len(user_agent_appid) > USERAGENT_APPID_MAXLEN + ): + logger.warning( + 'The configured value for user_agent_appid exceeds the ' + 'maximum length of %d characters.', + USERAGENT_APPID_MAXLEN, + ) + config_kwargs['user_agent_appid'] = user_agent_appid + + def _compute_sigv4a_signing_region_set_config(self, config_kwargs): + sigv4a_signing_region_set = config_kwargs.get( + 'sigv4a_signing_region_set' + ) + if sigv4a_signing_region_set is None: + sigv4a_signing_region_set = self._config_store.get_config_variable( + 'sigv4a_signing_region_set' + ) + config_kwargs['sigv4a_signing_region_set'] = sigv4a_signing_region_set + + def _compute_checksum_config(self, config_kwargs): + self._handle_checksum_config( + config_kwargs, + config_key="request_checksum_calculation", + valid_options=VALID_REQUEST_CHECKSUM_CALCULATION_CONFIG, + ) + self._handle_checksum_config( + config_kwargs, + config_key="response_checksum_validation", + valid_options=VALID_RESPONSE_CHECKSUM_VALIDATION_CONFIG, + ) + + def _handle_checksum_config( + self, + config_kwargs, + config_key, + valid_options, + ): + value = config_kwargs.get(config_key) + if value is None: + value = self._config_store.get_config_variable(config_key) + + if isinstance(value, str): + value = value.lower() + + if value not in valid_options: + raise botocore.exceptions.InvalidChecksumConfigError( + config_key=config_key, + config_value=value, + valid_options=valid_options, + ) + self._register_checksum_config_feature_ids(value, config_key) + config_kwargs[config_key] = value + + def _register_checksum_config_feature_ids(self, value, config_key): + checksum_config_feature_id = None + if config_key == "request_checksum_calculation": + checksum_config_feature_id = ( + f"FLEXIBLE_CHECKSUMS_REQ_{value.upper()}" + ) + elif config_key == "response_checksum_validation": + checksum_config_feature_id = ( + f"FLEXIBLE_CHECKSUMS_RES_{value.upper()}" + ) + if checksum_config_feature_id is not None: + register_feature_id(checksum_config_feature_id) + + def _compute_account_id_endpoint_mode_config(self, config_kwargs): + config_key = 'account_id_endpoint_mode' + + # Disable account id based endpoint routing for unsigned requests + # since there are no credentials to resolve. + signature_version = config_kwargs.get('signature_version') + if signature_version is botocore.UNSIGNED: + config_kwargs[config_key] = 'disabled' + return + + account_id_endpoint_mode = config_kwargs.get(config_key) + if account_id_endpoint_mode is None: + account_id_endpoint_mode = self._config_store.get_config_variable( + config_key + ) + + if isinstance(account_id_endpoint_mode, str): + account_id_endpoint_mode = account_id_endpoint_mode.lower() + + if ( + account_id_endpoint_mode + not in VALID_ACCOUNT_ID_ENDPOINT_MODE_CONFIG + ): + raise botocore.exceptions.InvalidConfigError( + error_msg=f"The configured value '{account_id_endpoint_mode}' for '{config_key}' is " + f"invalid. Valid values are: {VALID_ACCOUNT_ID_ENDPOINT_MODE_CONFIG}." + ) + + config_kwargs[config_key] = account_id_endpoint_mode + + def _compute_auth_scheme_preference_config( + self, client_config, config_kwargs + ): + config_key = 'auth_scheme_preference' + set_in_config_object = False + + if client_config and client_config.auth_scheme_preference: + value = client_config.auth_scheme_preference + set_in_config_object = True + else: + value = self._config_store.get_config_variable(config_key) + + if value is None: + config_kwargs[config_key] = None + return + + if not isinstance(value, str): + raise botocore.exceptions.InvalidConfigError( + error_msg=( + f"{config_key} must be a comma-delimited string. " + f"Received {type(value)} instead: {value}." + ) + ) + + value = ','.join( + item.replace(' ', '').replace('\t', '') + for item in value.split(',') + if item.strip() + ) + + if set_in_config_object: + value = ClientConfigString(value) + + config_kwargs[config_key] = value + + def _compute_signature_version_config(self, client_config, config_kwargs): + if client_config and client_config.signature_version: + value = client_config.signature_version + if isinstance(value, str): + config_kwargs['signature_version'] = ClientConfigString(value) + + +class ConfigObjectWrapper: + """Base class to mark values set via in-code Config object.""" + + pass + + +class ClientConfigString(str, ConfigObjectWrapper): + def __new__(cls, value=None): + return super().__new__(cls, value) diff --git a/py311/lib/python3.11/site-packages/botocore/auth.py b/py311/lib/python3.11/site-packages/botocore/auth.py new file mode 100644 index 0000000000000000000000000000000000000000..7754f210074f7da4de46bb414a0f31df1b7b1117 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/auth.py @@ -0,0 +1,1227 @@ +# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/ +# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import base64 +import calendar +import datetime +import functools +import hmac +import json +import logging +import time +from collections.abc import Mapping +from email.utils import formatdate +from hashlib import sha1, sha256 +from operator import itemgetter + +from botocore.compat import ( + HAS_CRT, + MD5_AVAILABLE, # noqa: F401 + HTTPHeaders, + encodebytes, + ensure_unicode, + get_current_datetime, + parse_qs, + quote, + unquote, + urlsplit, + urlunsplit, +) +from botocore.exceptions import ( + NoAuthTokenError, + NoCredentialsError, + UnknownSignatureVersionError, + UnsupportedSignatureVersionError, +) +from botocore.utils import ( + is_valid_ipv6_endpoint_url, + normalize_url_path, + percent_encode_sequence, +) + +logger = logging.getLogger(__name__) + + +EMPTY_SHA256_HASH = ( + 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' +) +# This is the buffer size used when calculating sha256 checksums. +# Experimenting with various buffer sizes showed that this value generally +# gave the best result (in terms of performance). +PAYLOAD_BUFFER = 1024 * 1024 +ISO8601 = '%Y-%m-%dT%H:%M:%SZ' +SIGV4_TIMESTAMP = '%Y%m%dT%H%M%SZ' +SIGNED_HEADERS_BLACKLIST = [ + 'expect', + 'transfer-encoding', + 'user-agent', + 'x-amzn-trace-id', +] +UNSIGNED_PAYLOAD = 'UNSIGNED-PAYLOAD' +STREAMING_UNSIGNED_PAYLOAD_TRAILER = 'STREAMING-UNSIGNED-PAYLOAD-TRAILER' + + +def _host_from_url(url): + # Given URL, derive value for host header. Ensure that value: + # 1) is lowercase + # 2) excludes port, if it was the default port + # 3) excludes userinfo + url_parts = urlsplit(url) + host = url_parts.hostname # urlsplit's hostname is always lowercase + if is_valid_ipv6_endpoint_url(url): + host = f'[{host}]' + default_ports = { + 'http': 80, + 'https': 443, + } + if url_parts.port is not None: + if url_parts.port != default_ports.get(url_parts.scheme): + host = f'{host}:{url_parts.port}' + return host + + +def _get_body_as_dict(request): + # For query services, request.data is form-encoded and is already a + # dict, but for other services such as rest-json it could be a json + # string or bytes. In those cases we attempt to load the data as a + # dict. + data = request.data + if isinstance(data, bytes): + data = json.loads(data.decode('utf-8')) + elif isinstance(data, str): + data = json.loads(data) + return data + + +class BaseSigner: + REQUIRES_REGION = False + REQUIRES_TOKEN = False + + def add_auth(self, request): + raise NotImplementedError("add_auth") + + +class TokenSigner(BaseSigner): + REQUIRES_TOKEN = True + """ + Signers that expect an authorization token to perform the authorization + """ + + def __init__(self, auth_token): + self.auth_token = auth_token + + +class SigV2Auth(BaseSigner): + """ + Sign a request with Signature V2. + """ + + def __init__(self, credentials): + self.credentials = credentials + + def calc_signature(self, request, params): + logger.debug("Calculating signature using v2 auth.") + split = urlsplit(request.url) + path = split.path + if len(path) == 0: + path = '/' + string_to_sign = f"{request.method}\n{split.netloc}\n{path}\n" + lhmac = hmac.new( + self.credentials.secret_key.encode("utf-8"), digestmod=sha256 + ) + pairs = [] + for key in sorted(params): + # Any previous signature should not be a part of this + # one, so we skip that particular key. This prevents + # issues during retries. + if key == 'Signature': + continue + value = str(params[key]) + quoted_key = quote(key.encode('utf-8'), safe='') + quoted_value = quote(value.encode('utf-8'), safe='-_~') + pairs.append(f'{quoted_key}={quoted_value}') + qs = '&'.join(pairs) + string_to_sign += qs + logger.debug('String to sign: %s', string_to_sign) + lhmac.update(string_to_sign.encode('utf-8')) + b64 = base64.b64encode(lhmac.digest()).strip().decode('utf-8') + return (qs, b64) + + def add_auth(self, request): + # The auth handler is the last thing called in the + # preparation phase of a prepared request. + # Because of this we have to parse the query params + # from the request body so we can update them with + # the sigv2 auth params. + if self.credentials is None: + raise NoCredentialsError() + if request.data: + # POST + params = request.data + else: + # GET + params = request.params + params['AWSAccessKeyId'] = self.credentials.access_key + params['SignatureVersion'] = '2' + params['SignatureMethod'] = 'HmacSHA256' + params['Timestamp'] = time.strftime(ISO8601, time.gmtime()) + if self.credentials.token: + params['SecurityToken'] = self.credentials.token + qs, signature = self.calc_signature(request, params) + params['Signature'] = signature + return request + + +class SigV3Auth(BaseSigner): + def __init__(self, credentials): + self.credentials = credentials + + def add_auth(self, request): + if self.credentials is None: + raise NoCredentialsError() + if 'Date' in request.headers: + del request.headers['Date'] + request.headers['Date'] = formatdate(usegmt=True) + if self.credentials.token: + if 'X-Amz-Security-Token' in request.headers: + del request.headers['X-Amz-Security-Token'] + request.headers['X-Amz-Security-Token'] = self.credentials.token + new_hmac = hmac.new( + self.credentials.secret_key.encode('utf-8'), digestmod=sha256 + ) + new_hmac.update(request.headers['Date'].encode('utf-8')) + encoded_signature = encodebytes(new_hmac.digest()).strip() + signature = ( + f"AWS3-HTTPS AWSAccessKeyId={self.credentials.access_key}," + f"Algorithm=HmacSHA256,Signature={encoded_signature.decode('utf-8')}" + ) + if 'X-Amzn-Authorization' in request.headers: + del request.headers['X-Amzn-Authorization'] + request.headers['X-Amzn-Authorization'] = signature + + +class SigV4Auth(BaseSigner): + """ + Sign a request with Signature V4. + """ + + REQUIRES_REGION = True + + def __init__(self, credentials, service_name, region_name): + self.credentials = credentials + # We initialize these value here so the unit tests can have + # valid values. But these will get overriden in ``add_auth`` + # later for real requests. + self._region_name = region_name + self._service_name = service_name + + def _sign(self, key, msg, hex=False): + if hex: + sig = hmac.new(key, msg.encode('utf-8'), sha256).hexdigest() + else: + sig = hmac.new(key, msg.encode('utf-8'), sha256).digest() + return sig + + def headers_to_sign(self, request): + """ + Select the headers from the request that need to be included + in the StringToSign. + """ + header_map = HTTPHeaders() + for name, value in request.headers.items(): + lname = name.lower() + if lname not in SIGNED_HEADERS_BLACKLIST: + header_map[lname] = value + if 'host' not in header_map: + # TODO: We should set the host ourselves, instead of relying on our + # HTTP client to set it for us. + header_map['host'] = _host_from_url(request.url) + return header_map + + def canonical_query_string(self, request): + # The query string can come from two parts. One is the + # params attribute of the request. The other is from the request + # url (in which case we have to re-split the url into its components + # and parse out the query string component). + if request.params: + return self._canonical_query_string_params(request.params) + else: + return self._canonical_query_string_url(urlsplit(request.url)) + + def _canonical_query_string_params(self, params): + # [(key, value), (key2, value2)] + key_val_pairs = [] + if isinstance(params, Mapping): + params = params.items() + for key, value in params: + key_val_pairs.append( + (quote(key, safe='-_.~'), quote(str(value), safe='-_.~')) + ) + sorted_key_vals = [] + # Sort by the URI-encoded key names, and in the case of + # repeated keys, then sort by the value. + for key, value in sorted(key_val_pairs): + sorted_key_vals.append(f'{key}={value}') + canonical_query_string = '&'.join(sorted_key_vals) + return canonical_query_string + + def _canonical_query_string_url(self, parts): + canonical_query_string = '' + if parts.query: + # [(key, value), (key2, value2)] + key_val_pairs = [] + for pair in parts.query.split('&'): + key, _, value = pair.partition('=') + key_val_pairs.append((key, value)) + sorted_key_vals = [] + # Sort by the URI-encoded key names, and in the case of + # repeated keys, then sort by the value. + for key, value in sorted(key_val_pairs): + sorted_key_vals.append(f'{key}={value}') + canonical_query_string = '&'.join(sorted_key_vals) + return canonical_query_string + + def canonical_headers(self, headers_to_sign): + """ + Return the headers that need to be included in the StringToSign + in their canonical form by converting all header keys to lower + case, sorting them in alphabetical order and then joining + them into a string, separated by newlines. + """ + headers = [] + sorted_header_names = sorted(set(headers_to_sign)) + for key in sorted_header_names: + value = ','.join( + self._header_value(v) for v in headers_to_sign.get_all(key) + ) + headers.append(f'{key}:{ensure_unicode(value)}') + return '\n'.join(headers) + + def _header_value(self, value): + # From the sigv4 docs: + # Lowercase(HeaderName) + ':' + Trimall(HeaderValue) + # + # The Trimall function removes excess white space before and after + # values, and converts sequential spaces to a single space. + return ' '.join(value.split()) + + def signed_headers(self, headers_to_sign): + headers = sorted(n.lower().strip() for n in set(headers_to_sign)) + return ';'.join(headers) + + def _is_streaming_checksum_payload(self, request): + checksum_context = request.context.get('checksum', {}) + algorithm = checksum_context.get('request_algorithm') + return isinstance(algorithm, dict) and algorithm.get('in') == 'trailer' + + def payload(self, request): + if self._is_streaming_checksum_payload(request): + return STREAMING_UNSIGNED_PAYLOAD_TRAILER + elif not self._should_sha256_sign_payload(request): + # When payload signing is disabled, we use this static string in + # place of the payload checksum. + return UNSIGNED_PAYLOAD + request_body = request.body + if request_body and hasattr(request_body, 'seek'): + position = request_body.tell() + read_chunksize = functools.partial( + request_body.read, PAYLOAD_BUFFER + ) + checksum = sha256() + for chunk in iter(read_chunksize, b''): + checksum.update(chunk) + hex_checksum = checksum.hexdigest() + request_body.seek(position) + return hex_checksum + elif request_body: + # The request serialization has ensured that + # request.body is a bytes() type. + return sha256(request_body).hexdigest() + else: + return EMPTY_SHA256_HASH + + def _should_sha256_sign_payload(self, request): + # Payloads will always be signed over insecure connections. + if not request.url.startswith('https'): + return True + + # Certain operations may have payload signing disabled by default. + # Since we don't have access to the operation model, we pass in this + # bit of metadata through the request context. + return request.context.get('payload_signing_enabled', True) + + def canonical_request(self, request): + cr = [request.method.upper()] + path = self._normalize_url_path(urlsplit(request.url).path) + cr.append(path) + cr.append(self.canonical_query_string(request)) + headers_to_sign = self.headers_to_sign(request) + cr.append(self.canonical_headers(headers_to_sign) + '\n') + cr.append(self.signed_headers(headers_to_sign)) + if 'X-Amz-Content-SHA256' in request.headers: + body_checksum = request.headers['X-Amz-Content-SHA256'] + else: + body_checksum = self.payload(request) + cr.append(body_checksum) + return '\n'.join(cr) + + def _normalize_url_path(self, path): + normalized_path = quote(normalize_url_path(path), safe='/~') + return normalized_path + + def scope(self, request): + scope = [self.credentials.access_key] + scope.append(request.context['timestamp'][0:8]) + scope.append(self._region_name) + scope.append(self._service_name) + scope.append('aws4_request') + return '/'.join(scope) + + def credential_scope(self, request): + scope = [] + scope.append(request.context['timestamp'][0:8]) + scope.append(self._region_name) + scope.append(self._service_name) + scope.append('aws4_request') + return '/'.join(scope) + + def string_to_sign(self, request, canonical_request): + """ + Return the canonical StringToSign as well as a dict + containing the original version of all headers that + were included in the StringToSign. + """ + sts = ['AWS4-HMAC-SHA256'] + sts.append(request.context['timestamp']) + sts.append(self.credential_scope(request)) + sts.append(sha256(canonical_request.encode('utf-8')).hexdigest()) + return '\n'.join(sts) + + def signature(self, string_to_sign, request): + key = self.credentials.secret_key + k_date = self._sign( + (f"AWS4{key}").encode(), request.context["timestamp"][0:8] + ) + k_region = self._sign(k_date, self._region_name) + k_service = self._sign(k_region, self._service_name) + k_signing = self._sign(k_service, 'aws4_request') + return self._sign(k_signing, string_to_sign, hex=True) + + def add_auth(self, request): + if self.credentials is None: + raise NoCredentialsError() + datetime_now = get_current_datetime() + request.context['timestamp'] = datetime_now.strftime(SIGV4_TIMESTAMP) + # This could be a retry. Make sure the previous + # authorization header is removed first. + self._modify_request_before_signing(request) + canonical_request = self.canonical_request(request) + logger.debug("Calculating signature using v4 auth.") + logger.debug('CanonicalRequest:\n%s', canonical_request) + string_to_sign = self.string_to_sign(request, canonical_request) + logger.debug('StringToSign:\n%s', string_to_sign) + signature = self.signature(string_to_sign, request) + logger.debug('Signature:\n%s', signature) + + self._inject_signature_to_request(request, signature) + + def _inject_signature_to_request(self, request, signature): + auth_str = [f'AWS4-HMAC-SHA256 Credential={self.scope(request)}'] + headers_to_sign = self.headers_to_sign(request) + auth_str.append( + f"SignedHeaders={self.signed_headers(headers_to_sign)}" + ) + auth_str.append(f'Signature={signature}') + request.headers['Authorization'] = ', '.join(auth_str) + return request + + def _modify_request_before_signing(self, request): + if 'Authorization' in request.headers: + del request.headers['Authorization'] + self._set_necessary_date_headers(request) + if self.credentials.token: + if 'X-Amz-Security-Token' in request.headers: + del request.headers['X-Amz-Security-Token'] + request.headers['X-Amz-Security-Token'] = self.credentials.token + + if not request.context.get('payload_signing_enabled', True): + if 'X-Amz-Content-SHA256' in request.headers: + del request.headers['X-Amz-Content-SHA256'] + request.headers['X-Amz-Content-SHA256'] = UNSIGNED_PAYLOAD + + def _set_necessary_date_headers(self, request): + # The spec allows for either the Date _or_ the X-Amz-Date value to be + # used so we check both. If there's a Date header, we use the date + # header. Otherwise we use the X-Amz-Date header. + if 'Date' in request.headers: + del request.headers['Date'] + datetime_timestamp = datetime.datetime.strptime( + request.context['timestamp'], SIGV4_TIMESTAMP + ) + request.headers['Date'] = formatdate( + int(calendar.timegm(datetime_timestamp.timetuple())) + ) + if 'X-Amz-Date' in request.headers: + del request.headers['X-Amz-Date'] + else: + if 'X-Amz-Date' in request.headers: + del request.headers['X-Amz-Date'] + request.headers['X-Amz-Date'] = request.context['timestamp'] + + +class S3SigV4Auth(SigV4Auth): + def _modify_request_before_signing(self, request): + super()._modify_request_before_signing(request) + if 'X-Amz-Content-SHA256' in request.headers: + del request.headers['X-Amz-Content-SHA256'] + + request.headers['X-Amz-Content-SHA256'] = self.payload(request) + + def _should_sha256_sign_payload(self, request): + # S3 allows optional body signing, so to minimize the performance + # impact, we opt to not SHA256 sign the body on streaming uploads, + # provided that we're on https. + client_config = request.context.get('client_config') + s3_config = getattr(client_config, 's3', None) + + # The config could be None if it isn't set, or if the customer sets it + # to None. + if s3_config is None: + s3_config = {} + + # The explicit configuration takes precedence over any implicit + # configuration. + sign_payload = s3_config.get('payload_signing_enabled', None) + if sign_payload is not None: + return sign_payload + + # We require that both a checksum be present and https be enabled + # to implicitly disable body signing. The combination of TLS and + # a checksum is sufficiently secure and durable for us to be + # confident in the request without body signing. + checksum_header = 'Content-MD5' + checksum_context = request.context.get('checksum', {}) + algorithm = checksum_context.get('request_algorithm') + if isinstance(algorithm, dict) and algorithm.get('in') == 'header': + checksum_header = algorithm['name'] + if ( + not request.url.startswith("https") + or checksum_header not in request.headers + ): + return True + + # If the input is streaming we disable body signing by default. + if request.context.get('has_streaming_input', False): + return False + + # If the S3-specific checks had no results, delegate to the generic + # checks. + return super()._should_sha256_sign_payload(request) + + def _normalize_url_path(self, path): + # For S3, we do not normalize the path. + return path + + +class S3ExpressAuth(S3SigV4Auth): + REQUIRES_IDENTITY_CACHE = True + + def __init__( + self, credentials, service_name, region_name, *, identity_cache + ): + super().__init__(credentials, service_name, region_name) + self._identity_cache = identity_cache + + def add_auth(self, request): + super().add_auth(request) + + def _modify_request_before_signing(self, request): + super()._modify_request_before_signing(request) + if 'x-amz-s3session-token' not in request.headers: + request.headers['x-amz-s3session-token'] = self.credentials.token + # S3Express does not support STS' X-Amz-Security-Token + if 'X-Amz-Security-Token' in request.headers: + del request.headers['X-Amz-Security-Token'] + + +class S3ExpressPostAuth(S3ExpressAuth): + REQUIRES_IDENTITY_CACHE = True + + def add_auth(self, request): + datetime_now = get_current_datetime() + request.context['timestamp'] = datetime_now.strftime(SIGV4_TIMESTAMP) + + fields = {} + if request.context.get('s3-presign-post-fields', None) is not None: + fields = request.context['s3-presign-post-fields'] + + policy = {} + conditions = [] + if request.context.get('s3-presign-post-policy', None) is not None: + policy = request.context['s3-presign-post-policy'] + if policy.get('conditions', None) is not None: + conditions = policy['conditions'] + + policy['conditions'] = conditions + + fields['x-amz-algorithm'] = 'AWS4-HMAC-SHA256' + fields['x-amz-credential'] = self.scope(request) + fields['x-amz-date'] = request.context['timestamp'] + + conditions.append({'x-amz-algorithm': 'AWS4-HMAC-SHA256'}) + conditions.append({'x-amz-credential': self.scope(request)}) + conditions.append({'x-amz-date': request.context['timestamp']}) + + if self.credentials.token is not None: + fields['X-Amz-S3session-Token'] = self.credentials.token + conditions.append( + {'X-Amz-S3session-Token': self.credentials.token} + ) + + # Dump the base64 encoded policy into the fields dictionary. + fields['policy'] = base64.b64encode( + json.dumps(policy).encode('utf-8') + ).decode('utf-8') + + fields['x-amz-signature'] = self.signature(fields['policy'], request) + + request.context['s3-presign-post-fields'] = fields + request.context['s3-presign-post-policy'] = policy + + +class S3ExpressQueryAuth(S3ExpressAuth): + DEFAULT_EXPIRES = 300 + REQUIRES_IDENTITY_CACHE = True + + def __init__( + self, + credentials, + service_name, + region_name, + *, + identity_cache, + expires=DEFAULT_EXPIRES, + ): + super().__init__( + credentials, + service_name, + region_name, + identity_cache=identity_cache, + ) + self._expires = expires + + def _modify_request_before_signing(self, request): + # We automatically set this header, so if it's the auto-set value we + # want to get rid of it since it doesn't make sense for presigned urls. + content_type = request.headers.get('content-type') + blocklisted_content_type = ( + 'application/x-www-form-urlencoded; charset=utf-8' + ) + if content_type == blocklisted_content_type: + del request.headers['content-type'] + + # Note that we're not including X-Amz-Signature. + # From the docs: "The Canonical Query String must include all the query + # parameters from the preceding table except for X-Amz-Signature. + signed_headers = self.signed_headers(self.headers_to_sign(request)) + + auth_params = { + 'X-Amz-Algorithm': 'AWS4-HMAC-SHA256', + 'X-Amz-Credential': self.scope(request), + 'X-Amz-Date': request.context['timestamp'], + 'X-Amz-Expires': self._expires, + 'X-Amz-SignedHeaders': signed_headers, + } + if self.credentials.token is not None: + auth_params['X-Amz-S3session-Token'] = self.credentials.token + # Now parse the original query string to a dict, inject our new query + # params, and serialize back to a query string. + url_parts = urlsplit(request.url) + # parse_qs makes each value a list, but in our case we know we won't + # have repeated keys so we know we have single element lists which we + # can convert back to scalar values. + query_string_parts = parse_qs(url_parts.query, keep_blank_values=True) + query_dict = {k: v[0] for k, v in query_string_parts.items()} + + if request.params: + query_dict.update(request.params) + request.params = {} + # The spec is particular about this. It *has* to be: + # https://?& + # You can't mix the two types of params together, i.e just keep doing + # new_query_params.update(op_params) + # new_query_params.update(auth_params) + # percent_encode_sequence(new_query_params) + operation_params = '' + if request.data: + # We also need to move the body params into the query string. To + # do this, we first have to convert it to a dict. + query_dict.update(_get_body_as_dict(request)) + request.data = '' + if query_dict: + operation_params = percent_encode_sequence(query_dict) + '&' + new_query_string = ( + f"{operation_params}{percent_encode_sequence(auth_params)}" + ) + # url_parts is a tuple (and therefore immutable) so we need to create + # a new url_parts with the new query string. + # - + # scheme - 0 + # netloc - 1 + # path - 2 + # query - 3 <-- we're replacing this. + # fragment - 4 + p = url_parts + new_url_parts = (p[0], p[1], p[2], new_query_string, p[4]) + request.url = urlunsplit(new_url_parts) + + def _inject_signature_to_request(self, request, signature): + # Rather than calculating an "Authorization" header, for the query + # param quth, we just append an 'X-Amz-Signature' param to the end + # of the query string. + request.url += f'&X-Amz-Signature={signature}' + + def _normalize_url_path(self, path): + # For S3, we do not normalize the path. + return path + + def payload(self, request): + # From the doc link above: + # "You don't include a payload hash in the Canonical Request, because + # when you create a presigned URL, you don't know anything about the + # payload. Instead, you use a constant string "UNSIGNED-PAYLOAD". + return UNSIGNED_PAYLOAD + + +class SigV4QueryAuth(SigV4Auth): + DEFAULT_EXPIRES = 3600 + + def __init__( + self, credentials, service_name, region_name, expires=DEFAULT_EXPIRES + ): + super().__init__(credentials, service_name, region_name) + self._expires = expires + + def _modify_request_before_signing(self, request): + # We automatically set this header, so if it's the auto-set value we + # want to get rid of it since it doesn't make sense for presigned urls. + content_type = request.headers.get('content-type') + blacklisted_content_type = ( + 'application/x-www-form-urlencoded; charset=utf-8' + ) + if content_type == blacklisted_content_type: + del request.headers['content-type'] + + # Note that we're not including X-Amz-Signature. + # From the docs: "The Canonical Query String must include all the query + # parameters from the preceding table except for X-Amz-Signature. + signed_headers = self.signed_headers(self.headers_to_sign(request)) + + auth_params = { + 'X-Amz-Algorithm': 'AWS4-HMAC-SHA256', + 'X-Amz-Credential': self.scope(request), + 'X-Amz-Date': request.context['timestamp'], + 'X-Amz-Expires': self._expires, + 'X-Amz-SignedHeaders': signed_headers, + } + if self.credentials.token is not None: + auth_params['X-Amz-Security-Token'] = self.credentials.token + # Now parse the original query string to a dict, inject our new query + # params, and serialize back to a query string. + url_parts = urlsplit(request.url) + # parse_qs makes each value a list, but in our case we know we won't + # have repeated keys so we know we have single element lists which we + # can convert back to scalar values. + query_string_parts = parse_qs(url_parts.query, keep_blank_values=True) + query_dict = {k: v[0] for k, v in query_string_parts.items()} + + if request.params: + query_dict.update(request.params) + request.params = {} + # The spec is particular about this. It *has* to be: + # https://?& + # You can't mix the two types of params together, i.e just keep doing + # new_query_params.update(op_params) + # new_query_params.update(auth_params) + # percent_encode_sequence(new_query_params) + operation_params = '' + if request.data: + # We also need to move the body params into the query string. To + # do this, we first have to convert it to a dict. + query_dict.update(_get_body_as_dict(request)) + request.data = '' + if query_dict: + operation_params = percent_encode_sequence(query_dict) + '&' + new_query_string = ( + f"{operation_params}{percent_encode_sequence(auth_params)}" + ) + # url_parts is a tuple (and therefore immutable) so we need to create + # a new url_parts with the new query string. + # - + # scheme - 0 + # netloc - 1 + # path - 2 + # query - 3 <-- we're replacing this. + # fragment - 4 + p = url_parts + new_url_parts = (p[0], p[1], p[2], new_query_string, p[4]) + request.url = urlunsplit(new_url_parts) + + def _inject_signature_to_request(self, request, signature): + # Rather than calculating an "Authorization" header, for the query + # param quth, we just append an 'X-Amz-Signature' param to the end + # of the query string. + request.url += f'&X-Amz-Signature={signature}' + + +class S3SigV4QueryAuth(SigV4QueryAuth): + """S3 SigV4 auth using query parameters. + + This signer will sign a request using query parameters and signature + version 4, i.e a "presigned url" signer. + + Based off of: + + http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html + + """ + + def _normalize_url_path(self, path): + # For S3, we do not normalize the path. + return path + + def payload(self, request): + # From the doc link above: + # "You don't include a payload hash in the Canonical Request, because + # when you create a presigned URL, you don't know anything about the + # payload. Instead, you use a constant string "UNSIGNED-PAYLOAD". + return UNSIGNED_PAYLOAD + + +class S3SigV4PostAuth(SigV4Auth): + """ + Presigns a s3 post + + Implementation doc here: + http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-UsingHTTPPOST.html + """ + + def add_auth(self, request): + datetime_now = get_current_datetime() + request.context['timestamp'] = datetime_now.strftime(SIGV4_TIMESTAMP) + + fields = {} + if request.context.get('s3-presign-post-fields', None) is not None: + fields = request.context['s3-presign-post-fields'] + + policy = {} + conditions = [] + if request.context.get('s3-presign-post-policy', None) is not None: + policy = request.context['s3-presign-post-policy'] + if policy.get('conditions', None) is not None: + conditions = policy['conditions'] + + policy['conditions'] = conditions + + fields['x-amz-algorithm'] = 'AWS4-HMAC-SHA256' + fields['x-amz-credential'] = self.scope(request) + fields['x-amz-date'] = request.context['timestamp'] + + conditions.append({'x-amz-algorithm': 'AWS4-HMAC-SHA256'}) + conditions.append({'x-amz-credential': self.scope(request)}) + conditions.append({'x-amz-date': request.context['timestamp']}) + + if self.credentials.token is not None: + fields['x-amz-security-token'] = self.credentials.token + conditions.append({'x-amz-security-token': self.credentials.token}) + + # Dump the base64 encoded policy into the fields dictionary. + fields['policy'] = base64.b64encode( + json.dumps(policy).encode('utf-8') + ).decode('utf-8') + + fields['x-amz-signature'] = self.signature(fields['policy'], request) + + request.context['s3-presign-post-fields'] = fields + request.context['s3-presign-post-policy'] = policy + + +class HmacV1Auth(BaseSigner): + # List of Query String Arguments of Interest + QSAOfInterest = [ + 'accelerate', + 'acl', + 'cors', + 'defaultObjectAcl', + 'location', + 'logging', + 'partNumber', + 'policy', + 'requestPayment', + 'torrent', + 'versioning', + 'versionId', + 'versions', + 'website', + 'uploads', + 'uploadId', + 'response-content-type', + 'response-content-language', + 'response-expires', + 'response-cache-control', + 'response-content-disposition', + 'response-content-encoding', + 'delete', + 'lifecycle', + 'tagging', + 'restore', + 'storageClass', + 'notification', + 'replication', + 'requestPayment', + 'analytics', + 'metrics', + 'inventory', + 'select', + 'select-type', + 'object-lock', + ] + + def __init__(self, credentials, service_name=None, region_name=None): + self.credentials = credentials + + def sign_string(self, string_to_sign): + new_hmac = hmac.new( + self.credentials.secret_key.encode('utf-8'), digestmod=sha1 + ) + new_hmac.update(string_to_sign.encode('utf-8')) + return encodebytes(new_hmac.digest()).strip().decode('utf-8') + + def canonical_standard_headers(self, headers): + interesting_headers = ['content-md5', 'content-type', 'date'] + hoi = [] + if 'Date' in headers: + del headers['Date'] + headers['Date'] = self._get_date() + for ih in interesting_headers: + found = False + for key in headers: + lk = key.lower() + if headers[key] is not None and lk == ih: + hoi.append(headers[key].strip()) + found = True + if not found: + hoi.append('') + return '\n'.join(hoi) + + def canonical_custom_headers(self, headers): + hoi = [] + custom_headers = {} + for key in headers: + lk = key.lower() + if headers[key] is not None: + if lk.startswith('x-amz-'): + custom_headers[lk] = ','.join( + v.strip() for v in headers.get_all(key) + ) + sorted_header_keys = sorted(custom_headers.keys()) + for key in sorted_header_keys: + hoi.append(f"{key}:{custom_headers[key]}") + return '\n'.join(hoi) + + def unquote_v(self, nv): + """ + TODO: Do we need this? + """ + if len(nv) == 1: + return nv + else: + return (nv[0], unquote(nv[1])) + + def canonical_resource(self, split, auth_path=None): + # don't include anything after the first ? in the resource... + # unless it is one of the QSA of interest, defined above + # NOTE: + # The path in the canonical resource should always be the + # full path including the bucket name, even for virtual-hosting + # style addressing. The ``auth_path`` keeps track of the full + # path for the canonical resource and would be passed in if + # the client was using virtual-hosting style. + if auth_path is not None: + buf = auth_path + else: + buf = split.path + if split.query: + qsa = split.query.split('&') + qsa = [a.split('=', 1) for a in qsa] + qsa = [ + self.unquote_v(a) for a in qsa if a[0] in self.QSAOfInterest + ] + if len(qsa) > 0: + qsa.sort(key=itemgetter(0)) + qsa = ['='.join(a) for a in qsa] + buf += '?' + buf += '&'.join(qsa) + return buf + + def canonical_string( + self, method, split, headers, expires=None, auth_path=None + ): + cs = method.upper() + '\n' + cs += self.canonical_standard_headers(headers) + '\n' + custom_headers = self.canonical_custom_headers(headers) + if custom_headers: + cs += custom_headers + '\n' + cs += self.canonical_resource(split, auth_path=auth_path) + return cs + + def get_signature( + self, method, split, headers, expires=None, auth_path=None + ): + if self.credentials.token: + del headers['x-amz-security-token'] + headers['x-amz-security-token'] = self.credentials.token + string_to_sign = self.canonical_string( + method, split, headers, auth_path=auth_path + ) + logger.debug('StringToSign:\n%s', string_to_sign) + return self.sign_string(string_to_sign) + + def add_auth(self, request): + if self.credentials is None: + raise NoCredentialsError + logger.debug("Calculating signature using hmacv1 auth.") + split = urlsplit(request.url) + logger.debug("HTTP request method: %s", request.method) + signature = self.get_signature( + request.method, split, request.headers, auth_path=request.auth_path + ) + self._inject_signature(request, signature) + + def _get_date(self): + return formatdate(usegmt=True) + + def _inject_signature(self, request, signature): + if 'Authorization' in request.headers: + # We have to do this because request.headers is not + # normal dictionary. It has the (unintuitive) behavior + # of aggregating repeated setattr calls for the same + # key value. For example: + # headers['foo'] = 'a'; headers['foo'] = 'b' + # list(headers) will print ['foo', 'foo']. + del request.headers['Authorization'] + + auth_header = f"AWS {self.credentials.access_key}:{signature}" + request.headers['Authorization'] = auth_header + + +class HmacV1QueryAuth(HmacV1Auth): + """ + Generates a presigned request for s3. + + Spec from this document: + + http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html + #RESTAuthenticationQueryStringAuth + + """ + + DEFAULT_EXPIRES = 3600 + + def __init__(self, credentials, expires=DEFAULT_EXPIRES): + self.credentials = credentials + self._expires = expires + + def _get_date(self): + return str(int(time.time() + int(self._expires))) + + def _inject_signature(self, request, signature): + query_dict = {} + query_dict['AWSAccessKeyId'] = self.credentials.access_key + query_dict['Signature'] = signature + + for header_key in request.headers: + lk = header_key.lower() + # For query string requests, Expires is used instead of the + # Date header. + if header_key == 'Date': + query_dict['Expires'] = request.headers['Date'] + # We only want to include relevant headers in the query string. + # These can be anything that starts with x-amz, is Content-MD5, + # or is Content-Type. + elif lk.startswith('x-amz-') or lk in ( + 'content-md5', + 'content-type', + ): + query_dict[lk] = request.headers[lk] + # Combine all of the identified headers into an encoded + # query string + new_query_string = percent_encode_sequence(query_dict) + + # Create a new url with the presigned url. + p = urlsplit(request.url) + if p[3]: + # If there was a pre-existing query string, we should + # add that back before injecting the new query string. + new_query_string = f'{p[3]}&{new_query_string}' + new_url_parts = (p[0], p[1], p[2], new_query_string, p[4]) + request.url = urlunsplit(new_url_parts) + + +class HmacV1PostAuth(HmacV1Auth): + """ + Generates a presigned post for s3. + + Spec from this document: + + http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingHTTPPOST.html + """ + + def add_auth(self, request): + fields = {} + if request.context.get('s3-presign-post-fields', None) is not None: + fields = request.context['s3-presign-post-fields'] + + policy = {} + conditions = [] + if request.context.get('s3-presign-post-policy', None) is not None: + policy = request.context['s3-presign-post-policy'] + if policy.get('conditions', None) is not None: + conditions = policy['conditions'] + + policy['conditions'] = conditions + + fields['AWSAccessKeyId'] = self.credentials.access_key + + if self.credentials.token is not None: + fields['x-amz-security-token'] = self.credentials.token + conditions.append({'x-amz-security-token': self.credentials.token}) + + # Dump the base64 encoded policy into the fields dictionary. + fields['policy'] = base64.b64encode( + json.dumps(policy).encode('utf-8') + ).decode('utf-8') + + fields['signature'] = self.sign_string(fields['policy']) + + request.context['s3-presign-post-fields'] = fields + request.context['s3-presign-post-policy'] = policy + + +class BearerAuth(TokenSigner): + """ + Performs bearer token authorization by placing the bearer token in the + Authorization header as specified by Section 2.1 of RFC 6750. + + https://datatracker.ietf.org/doc/html/rfc6750#section-2.1 + """ + + def add_auth(self, request): + if self.auth_token is None: + raise NoAuthTokenError() + + auth_header = f'Bearer {self.auth_token.token}' + if 'Authorization' in request.headers: + del request.headers['Authorization'] + request.headers['Authorization'] = auth_header + + +def resolve_auth_type(auth_trait): + for auth_type in auth_trait: + if auth_type == 'smithy.api#noAuth': + return AUTH_TYPE_TO_SIGNATURE_VERSION[auth_type] + elif auth_type in AUTH_TYPE_TO_SIGNATURE_VERSION: + signature_version = AUTH_TYPE_TO_SIGNATURE_VERSION[auth_type] + if signature_version in AUTH_TYPE_MAPS: + return signature_version + else: + raise UnknownSignatureVersionError(signature_version=auth_type) + raise UnsupportedSignatureVersionError(signature_version=auth_trait) + + +def resolve_auth_scheme_preference(preference_list, auth_options): + service_supported = [scheme.split('#')[-1] for scheme in auth_options] + + unsupported = [ + scheme + for scheme in preference_list + if scheme not in AUTH_PREF_TO_SIGNATURE_VERSION + ] + if unsupported: + logger.debug( + "Unsupported auth schemes in preference list: %r", unsupported + ) + + combined = preference_list + service_supported + prioritized_schemes = [ + scheme + for scheme in dict.fromkeys(combined) + if scheme in service_supported + ] + + for scheme in prioritized_schemes: + if scheme == 'noAuth': + return AUTH_PREF_TO_SIGNATURE_VERSION[scheme] + sig_version = AUTH_PREF_TO_SIGNATURE_VERSION.get(scheme) + if sig_version in AUTH_TYPE_MAPS: + return sig_version + + raise UnsupportedSignatureVersionError( + signature_version=', '.join(sorted(service_supported)) + ) + + +AUTH_TYPE_MAPS = { + 'v2': SigV2Auth, + 'v3': SigV3Auth, + 'v3https': SigV3Auth, + 's3': HmacV1Auth, + 's3-query': HmacV1QueryAuth, + 's3-presign-post': HmacV1PostAuth, + 's3v4-presign-post': S3SigV4PostAuth, + 'v4-s3express': S3ExpressAuth, + 'v4-s3express-query': S3ExpressQueryAuth, + 'v4-s3express-presign-post': S3ExpressPostAuth, + 'bearer': BearerAuth, +} + +# Define v4 signers depending on if CRT is present +if HAS_CRT: + from botocore.crt.auth import CRT_AUTH_TYPE_MAPS + + AUTH_TYPE_MAPS.update(CRT_AUTH_TYPE_MAPS) +else: + AUTH_TYPE_MAPS.update( + { + 'v4': SigV4Auth, + 'v4-query': SigV4QueryAuth, + 's3v4': S3SigV4Auth, + 's3v4-query': S3SigV4QueryAuth, + } + ) + +AUTH_TYPE_TO_SIGNATURE_VERSION = { + 'aws.auth#sigv4': 'v4', + 'aws.auth#sigv4a': 'v4a', + 'smithy.api#httpBearerAuth': 'bearer', + 'smithy.api#noAuth': 'none', +} + +# Mapping used specifically for resolving user-configured auth scheme preferences. +# This is similar to AUTH_TYPE_TO_SIGNATURE_VERSION, but uses simplified keys by +# stripping the auth trait prefixes ('smithy.api#httpBearerAuth' → 'httpBearerAuth'). +# These simplified keys match what customers are expected to provide in configuration. +AUTH_PREF_TO_SIGNATURE_VERSION = { + auth_scheme.split('#')[-1]: sig_version + for auth_scheme, sig_version in AUTH_TYPE_TO_SIGNATURE_VERSION.items() +} diff --git a/py311/lib/python3.11/site-packages/botocore/awsrequest.py b/py311/lib/python3.11/site-packages/botocore/awsrequest.py new file mode 100644 index 0000000000000000000000000000000000000000..06681395a223e950bdce3c010caca2b644fe3fd1 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/awsrequest.py @@ -0,0 +1,635 @@ +# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/ +# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import functools +import logging +from collections.abc import Mapping + +import urllib3.util +from urllib3.connection import HTTPConnection, VerifiedHTTPSConnection +from urllib3.connectionpool import HTTPConnectionPool, HTTPSConnectionPool + +import botocore.utils +from botocore.compat import ( + HTTPHeaders, + HTTPResponse, + MutableMapping, + urlencode, + urlparse, + urlsplit, + urlunsplit, +) +from botocore.exceptions import UnseekableStreamError + +logger = logging.getLogger(__name__) + + +class AWSHTTPResponse(HTTPResponse): + # The *args, **kwargs is used because the args are slightly + # different in py2.6 than in py2.7/py3. + def __init__(self, *args, **kwargs): + self._status_tuple = kwargs.pop('status_tuple') + HTTPResponse.__init__(self, *args, **kwargs) + + def _read_status(self): + if self._status_tuple is not None: + status_tuple = self._status_tuple + self._status_tuple = None + return status_tuple + else: + return HTTPResponse._read_status(self) + + +class AWSConnection: + """Mixin for HTTPConnection that supports Expect 100-continue. + + This when mixed with a subclass of httplib.HTTPConnection (though + technically we subclass from urllib3, which subclasses + httplib.HTTPConnection) and we only override this class to support Expect + 100-continue, which we need for S3. As far as I can tell, this is + general purpose enough to not be specific to S3, but I'm being + tentative and keeping it in botocore because I've only tested + this against AWS services. + + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._original_response_cls = self.response_class + # This variable is set when we receive an early response from the + # server. If this value is set to True, any calls to send() are noops. + # This value is reset to false every time _send_request is called. + # This is to workaround changes in urllib3 2.0 which uses separate + # send() calls in request() instead of delegating to endheaders(), + # which is where the body is sent in CPython's HTTPConnection. + self._response_received = False + self._expect_header_set = False + self._send_called = False + + def close(self): + super().close() + # Reset all of our instance state we were tracking. + self._response_received = False + self._expect_header_set = False + self._send_called = False + self.response_class = self._original_response_cls + + def request(self, method, url, body=None, headers=None, *args, **kwargs): + if headers is None: + headers = {} + self._response_received = False + if headers.get('Expect', b'') == b'100-continue': + self._expect_header_set = True + else: + self._expect_header_set = False + self.response_class = self._original_response_cls + rval = super().request(method, url, body, headers, *args, **kwargs) + self._expect_header_set = False + return rval + + def _convert_to_bytes(self, mixed_buffer): + # Take a list of mixed str/bytes and convert it + # all into a single bytestring. + # Any str will be encoded as utf-8. + bytes_buffer = [] + for chunk in mixed_buffer: + if isinstance(chunk, str): + bytes_buffer.append(chunk.encode('utf-8')) + else: + bytes_buffer.append(chunk) + msg = b"\r\n".join(bytes_buffer) + return msg + + def _send_output(self, message_body=None, *args, **kwargs): + self._buffer.extend((b"", b"")) + msg = self._convert_to_bytes(self._buffer) + del self._buffer[:] + # If msg and message_body are sent in a single send() call, + # it will avoid performance problems caused by the interaction + # between delayed ack and the Nagle algorithm. + if isinstance(message_body, bytes): + msg += message_body + message_body = None + self.send(msg) + if self._expect_header_set: + # This is our custom behavior. If the Expect header was + # set, it will trigger this custom behavior. + logger.debug("Waiting for 100 Continue response.") + # Wait for 1 second for the server to send a response. + if urllib3.util.wait_for_read(self.sock, 1): + self._handle_expect_response(message_body) + return + else: + # From the RFC: + # Because of the presence of older implementations, the + # protocol allows ambiguous situations in which a client may + # send "Expect: 100-continue" without receiving either a 417 + # (Expectation Failed) status or a 100 (Continue) status. + # Therefore, when a client sends this header field to an origin + # server (possibly via a proxy) from which it has never seen a + # 100 (Continue) status, the client SHOULD NOT wait for an + # indefinite period before sending the request body. + logger.debug( + "No response seen from server, continuing to " + "send the response body." + ) + if message_body is not None: + # message_body was not a string (i.e. it is a file), and + # we must run the risk of Nagle. + self.send(message_body) + + def _consume_headers(self, fp): + # Most servers (including S3) will just return + # the CLRF after the 100 continue response. However, + # some servers (I've specifically seen this for squid when + # used as a straight HTTP proxy) will also inject a + # Connection: keep-alive header. To account for this + # we'll read until we read '\r\n', and ignore any headers + # that come immediately after the 100 continue response. + current = None + while current != b'\r\n': + current = fp.readline() + + def _handle_expect_response(self, message_body): + # This is called when we sent the request headers containing + # an Expect: 100-continue header and received a response. + # We now need to figure out what to do. + fp = self.sock.makefile('rb', 0) + try: + maybe_status_line = fp.readline() + parts = maybe_status_line.split(None, 2) + if self._is_100_continue_status(maybe_status_line): + self._consume_headers(fp) + logger.debug( + "100 Continue response seen, now sending request body." + ) + self._send_message_body(message_body) + elif len(parts) == 3 and parts[0].startswith(b'HTTP/'): + # From the RFC: + # Requirements for HTTP/1.1 origin servers: + # + # - Upon receiving a request which includes an Expect + # request-header field with the "100-continue" + # expectation, an origin server MUST either respond with + # 100 (Continue) status and continue to read from the + # input stream, or respond with a final status code. + # + # So if we don't get a 100 Continue response, then + # whatever the server has sent back is the final response + # and don't send the message_body. + logger.debug( + "Received a non 100 Continue response " + "from the server, NOT sending request body." + ) + status_tuple = ( + parts[0].decode('ascii'), + int(parts[1]), + parts[2].decode('ascii'), + ) + response_class = functools.partial( + AWSHTTPResponse, status_tuple=status_tuple + ) + self.response_class = response_class + self._response_received = True + finally: + fp.close() + + def _send_message_body(self, message_body): + if message_body is not None: + self.send(message_body) + + def send(self, str): + if self._response_received: + if not self._send_called: + # urllib3 2.0 chunks and calls send potentially + # thousands of times inside `request` unlike the + # standard library. Only log this once for sanity. + logger.debug( + "send() called, but response already received. " + "Not sending data." + ) + self._send_called = True + return + return super().send(str) + + def _is_100_continue_status(self, maybe_status_line): + parts = maybe_status_line.split(None, 2) + # Check for HTTP/ 100 Continue\r\n or HTTP/ 100\r\n + return ( + len(parts) >= 2 + and parts[0].startswith(b'HTTP/') + and parts[1] == b'100' + ) + + +class AWSHTTPConnection(AWSConnection, HTTPConnection): + """An HTTPConnection that supports 100 Continue behavior.""" + + +class AWSHTTPSConnection(AWSConnection, VerifiedHTTPSConnection): + """An HTTPSConnection that supports 100 Continue behavior.""" + + +class AWSHTTPConnectionPool(HTTPConnectionPool): + ConnectionCls = AWSHTTPConnection + + +class AWSHTTPSConnectionPool(HTTPSConnectionPool): + ConnectionCls = AWSHTTPSConnection + + +def prepare_request_dict( + request_dict, endpoint_url, context=None, user_agent=None +): + """ + This method prepares a request dict to be created into an + AWSRequestObject. This prepares the request dict by adding the + url and the user agent to the request dict. + + :type request_dict: dict + :param request_dict: The request dict (created from the + ``serialize`` module). + + :type user_agent: string + :param user_agent: The user agent to use for this request. + + :type endpoint_url: string + :param endpoint_url: The full endpoint url, which contains at least + the scheme, the hostname, and optionally any path components. + """ + r = request_dict + if user_agent is not None: + headers = r['headers'] + headers['User-Agent'] = user_agent + host_prefix = r.get('host_prefix') + url = _urljoin(endpoint_url, r['url_path'], host_prefix) + if r['query_string']: + # NOTE: This is to avoid circular import with utils. This is being + # done to avoid moving classes to different modules as to not cause + # breaking chainges. + percent_encode_sequence = botocore.utils.percent_encode_sequence + encoded_query_string = percent_encode_sequence(r['query_string']) + if '?' not in url: + url += f'?{encoded_query_string}' + else: + url += f'&{encoded_query_string}' + r['url'] = url + r['context'] = context + if context is None: + r['context'] = {} + + +def create_request_object(request_dict): + """ + This method takes a request dict and creates an AWSRequest object + from it. + + :type request_dict: dict + :param request_dict: The request dict (created from the + ``prepare_request_dict`` method). + + :rtype: ``botocore.awsrequest.AWSRequest`` + :return: An AWSRequest object based on the request_dict. + + """ + r = request_dict + request_object = AWSRequest( + method=r['method'], + url=r['url'], + data=r['body'], + headers=r['headers'], + auth_path=r.get('auth_path'), + ) + request_object.context = r['context'] + return request_object + + +def _urljoin(endpoint_url, url_path, host_prefix): + p = urlsplit(endpoint_url) + # - + # scheme - p[0] + # netloc - p[1] + # path - p[2] + # query - p[3] + # fragment - p[4] + if not url_path or url_path == '/': + # If there's no path component, ensure the URL ends with + # a '/' for backwards compatibility. + if not p[2]: + new_path = '/' + else: + new_path = p[2] + elif p[2].endswith('/') and url_path.startswith('/'): + new_path = p[2][:-1] + url_path + else: + new_path = p[2] + url_path + + new_netloc = p[1] + if host_prefix is not None: + new_netloc = host_prefix + new_netloc + + reconstructed = urlunsplit((p[0], new_netloc, new_path, p[3], p[4])) + return reconstructed + + +class AWSRequestPreparer: + """ + This class performs preparation on AWSRequest objects similar to that of + the PreparedRequest class does in the requests library. However, the logic + has been boiled down to meet the specific use cases in botocore. Of note + there are the following differences: + This class does not heavily prepare the URL. Requests performed many + validations and corrections to ensure the URL is properly formatted. + Botocore either performs these validations elsewhere or otherwise + consistently provides well formatted URLs. + + This class does not heavily prepare the body. Body preperation is + simple and supports only the cases that we document: bytes and + file-like objects to determine the content-length. This will also + additionally prepare a body that is a dict to be url encoded params + string as some signers rely on this. Finally, this class does not + support multipart file uploads. + + This class does not prepare the method, auth or cookies. + """ + + def prepare(self, original): + method = original.method + url = self._prepare_url(original) + body = self._prepare_body(original) + headers = self._prepare_headers(original, body) + stream_output = original.stream_output + + return AWSPreparedRequest(method, url, headers, body, stream_output) + + def _prepare_url(self, original): + url = original.url + if original.params: + url_parts = urlparse(url) + delim = '&' if url_parts.query else '?' + if isinstance(original.params, Mapping): + params_to_encode = list(original.params.items()) + else: + params_to_encode = original.params + params = urlencode(params_to_encode, doseq=True) + url = delim.join((url, params)) + return url + + def _prepare_headers(self, original, prepared_body=None): + headers = HeadersDict(original.headers.items()) + + # If the transfer encoding or content length is already set, use that + if 'Transfer-Encoding' in headers or 'Content-Length' in headers: + return headers + + # Ensure we set the content length when it is expected + if original.method not in ('GET', 'HEAD', 'OPTIONS'): + length = self._determine_content_length(prepared_body) + if length is not None: + headers['Content-Length'] = str(length) + else: + # Failed to determine content length, using chunked + # NOTE: This shouldn't ever happen in practice + body_type = type(prepared_body) + logger.debug('Failed to determine length of %s', body_type) + headers['Transfer-Encoding'] = 'chunked' + + return headers + + def _to_utf8(self, item): + key, value = item + if isinstance(key, str): + key = key.encode('utf-8') + if isinstance(value, str): + value = value.encode('utf-8') + return key, value + + def _prepare_body(self, original): + """Prepares the given HTTP body data.""" + body = original.data + if body == b'': + body = None + + if isinstance(body, dict): + params = [self._to_utf8(item) for item in body.items()] + body = urlencode(params, doseq=True) + + return body + + def _determine_content_length(self, body): + return botocore.utils.determine_content_length(body) + + +class AWSRequest: + """Represents the elements of an HTTP request. + + This class was originally inspired by requests.models.Request, but has been + boiled down to meet the specific use cases in botocore. That being said this + class (even in requests) is effectively a named-tuple. + """ + + _REQUEST_PREPARER_CLS = AWSRequestPreparer + + def __init__( + self, + method=None, + url=None, + headers=None, + data=None, + params=None, + auth_path=None, + stream_output=False, + ): + self._request_preparer = self._REQUEST_PREPARER_CLS() + + # Default empty dicts for dict params. + params = {} if params is None else params + + self.method = method + self.url = url + self.headers = HTTPHeaders() + self.data = data + self.params = params + self.auth_path = auth_path + self.stream_output = stream_output + + if headers is not None: + for key, value in headers.items(): + self.headers[key] = value + + # This is a dictionary to hold information that is used when + # processing the request. What is inside of ``context`` is open-ended. + # For example, it may have a timestamp key that is used for holding + # what the timestamp is when signing the request. Note that none + # of the information that is inside of ``context`` is directly + # sent over the wire; the information is only used to assist in + # creating what is sent over the wire. + self.context = {} + + def prepare(self): + """Constructs a :class:`AWSPreparedRequest `.""" + return self._request_preparer.prepare(self) + + @property + def body(self): + body = self.prepare().body + if isinstance(body, str): + body = body.encode('utf-8') + return body + + +class AWSPreparedRequest: + """A data class representing a finalized request to be sent over the wire. + + Requests at this stage should be treated as final, and the properties of + the request should not be modified. + + :ivar method: The HTTP Method + :ivar url: The full url + :ivar headers: The HTTP headers to send. + :ivar body: The HTTP body. + :ivar stream_output: If the response for this request should be streamed. + """ + + def __init__(self, method, url, headers, body, stream_output): + self.method = method + self.url = url + self.headers = headers + self.body = body + self.stream_output = stream_output + + def __repr__(self): + fmt = ( + '' + ) + return fmt % (self.stream_output, self.method, self.url, self.headers) + + def reset_stream(self): + """Resets the streaming body to it's initial position. + + If the request contains a streaming body (a streamable file-like object) + seek to the object's initial position to ensure the entire contents of + the object is sent. This is a no-op for static bytes-like body types. + """ + # Trying to reset a stream when there is a no stream will + # just immediately return. It's not an error, it will produce + # the same result as if we had actually reset the stream (we'll send + # the entire body contents again if we need to). + # Same case if the body is a string/bytes/bytearray type. + + non_seekable_types = (bytes, str, bytearray) + if self.body is None or isinstance(self.body, non_seekable_types): + return + try: + logger.debug("Rewinding stream: %s", self.body) + self.body.seek(0) + except Exception as e: + logger.debug("Unable to rewind stream: %s", e) + raise UnseekableStreamError(stream_object=self.body) + + +class AWSResponse: + """A data class representing an HTTP response. + + This class was originally inspired by requests.models.Response, but has + been boiled down to meet the specific use cases in botocore. This has + effectively been reduced to a named tuple. + + :ivar url: The full url. + :ivar status_code: The status code of the HTTP response. + :ivar headers: The HTTP headers received. + :ivar body: The HTTP response body. + """ + + def __init__(self, url, status_code, headers, raw): + self.url = url + self.status_code = status_code + self.headers = HeadersDict(headers) + self.raw = raw + + self._content = None + + @property + def content(self): + """Content of the response as bytes.""" + + if self._content is None: + # Read the contents. + # NOTE: requests would attempt to call stream and fall back + # to a custom generator that would call read in a loop, but + # we don't rely on this behavior + self._content = b''.join(self.raw.stream()) or b'' + + return self._content + + @property + def text(self): + """Content of the response as a proper text type. + + Uses the encoding type provided in the reponse headers to decode the + response content into a proper text type. If the encoding is not + present in the headers, UTF-8 is used as a default. + """ + encoding = botocore.utils.get_encoding_from_headers(self.headers) + if encoding: + return self.content.decode(encoding) + else: + return self.content.decode('utf-8') + + +class _HeaderKey: + def __init__(self, key): + self._key = key + self._lower = key.lower() + + def __hash__(self): + return hash(self._lower) + + def __eq__(self, other): + return isinstance(other, _HeaderKey) and self._lower == other._lower + + def __str__(self): + return self._key + + def __repr__(self): + return repr(self._key) + + +class HeadersDict(MutableMapping): + """A case-insenseitive dictionary to represent HTTP headers.""" + + def __init__(self, *args, **kwargs): + self._dict = {} + self.update(*args, **kwargs) + + def __setitem__(self, key, value): + self._dict[_HeaderKey(key)] = value + + def __getitem__(self, key): + return self._dict[_HeaderKey(key)] + + def __delitem__(self, key): + del self._dict[_HeaderKey(key)] + + def __iter__(self): + return (str(key) for key in self._dict) + + def __len__(self): + return len(self._dict) + + def __repr__(self): + return repr(self._dict) + + def copy(self): + return HeadersDict(self.items()) diff --git a/py311/lib/python3.11/site-packages/botocore/cacert.pem b/py311/lib/python3.11/site-packages/botocore/cacert.pem new file mode 100644 index 0000000000000000000000000000000000000000..919478ed06ae84199e3afc23c22eec96365886c5 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/cacert.pem @@ -0,0 +1,4361 @@ + +# Issuer: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA +# Subject: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA +# Label: "GlobalSign Root CA" +# Serial: 4835703278459707669005204 +# MD5 Fingerprint: 3e:45:52:15:09:51:92:e1:b7:5d:37:9f:b1:87:29:8a +# SHA1 Fingerprint: b1:bc:96:8b:d4:f4:9d:62:2a:a8:9a:81:f2:15:01:52:a4:1d:82:9c +# SHA256 Fingerprint: eb:d4:10:40:e4:bb:3e:c7:42:c9:e3:81:d3:1e:f2:a4:1a:48:b6:68:5c:96:e7:ce:f3:c1:df:6c:d4:33:1c:99 +-----BEGIN CERTIFICATE----- +MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG +A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv +b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw +MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i +YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT +aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ +jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp +xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp +1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG +snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ +U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8 +9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B +AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz +yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE +38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP +AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad +DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME +HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2 +# Label: "GlobalSign Root CA - R2" +# Serial: 4835703278459682885658125 +# MD5 Fingerprint: 94:14:77:7e:3e:5e:fd:8f:30:bd:41:b0:cf:e7:d0:30 +# SHA1 Fingerprint: 75:e0:ab:b6:13:85:12:27:1c:04:f8:5f:dd:de:38:e4:b7:24:2e:fe +# SHA256 Fingerprint: ca:42:dd:41:74:5f:d0:b8:1e:b9:02:36:2c:f9:d8:bf:71:9d:a1:bd:1b:1e:fc:94:6f:5b:4c:99:f4:2c:1b:9e +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4G +A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNp +Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1 +MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEG +A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPL +v4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8 +eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklq +tTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzd +C9XZzPnqJworc5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pa +zq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCB +mTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUm+IH +V2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5n +bG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG +3lm0mi3f3BmGLjANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4Gs +J0/WwbgcQ3izDJr86iw8bmEbTUsp9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO +291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiPqFbQfXf5WRDLenVOavS +ot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMNYxd +AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 +TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== +-----END CERTIFICATE----- + +# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only +# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only +# Label: "Verisign Class 3 Public Primary Certification Authority - G3" +# Serial: 206684696279472310254277870180966723415 +# MD5 Fingerprint: cd:68:b6:a7:c7:c4:ce:75:e0:1d:4f:57:44:61:92:09 +# SHA1 Fingerprint: 13:2d:0d:45:53:4b:69:97:cd:b2:d5:c3:39:e2:55:76:60:9b:5c:c6 +# SHA256 Fingerprint: eb:04:cf:5e:b1:f3:9a:fa:76:2f:2b:b1:20:f2:96:cb:a5:20:c1:b9:7d:b1:58:95:65:b8:1c:b9:a1:7b:72:44 +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl +cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu +LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT +aWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD +VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT +aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ +bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu +IENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMu6nFL8eB8aHm8b +N3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1EUGO+i2t +KmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGu +kxUccLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBm +CC+Vk7+qRy+oRpfwEuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJ +Xwzw3sJ2zq/3avL6QaaiMxTJ5Xpj055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWu +imi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAERSWwauSCPc/L8my/uRan2Te +2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5fj267Cz3qWhMe +DGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC +/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565p +F4ErWjfJXir0xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGt +TxzhT5yvDwyd93gN2PQ1VoDat20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ== +-----END CERTIFICATE----- + +# Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited +# Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited +# Label: "Entrust.net Premium 2048 Secure Server CA" +# Serial: 946069240 +# MD5 Fingerprint: ee:29:31:bc:32:7e:9a:e6:e8:b5:f7:51:b4:34:71:90 +# SHA1 Fingerprint: 50:30:06:09:1d:97:d4:f5:ae:39:f7:cb:e7:92:7d:7d:65:2d:34:31 +# SHA256 Fingerprint: 6d:c4:71:72:e0:1c:bc:b0:bf:62:58:0d:89:5f:e2:b8:ac:9a:d4:f8:73:80:1e:0c:10:b9:c8:37:d2:1e:b1:77 +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML +RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp +bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5 +IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0yOTA3 +MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3 +LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp +YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG +A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq +K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe +sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX +MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT +XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/ +HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH +4QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJKoZIhvcNAQEFBQADggEBADub +j1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPyT/4xmf3IDExo +U8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf +zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5b +u/8j72gZyxKTJ1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+ +bYQLCIt+jerXmCHG8+c8eS9enNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/Er +fF6adulZkMV8gzURZVE= +-----END CERTIFICATE----- + +# Issuer: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust +# Subject: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust +# Label: "Baltimore CyberTrust Root" +# Serial: 33554617 +# MD5 Fingerprint: ac:b6:94:a5:9c:17:e0:d7:91:52:9b:b1:97:06:a6:e4 +# SHA1 Fingerprint: d4:de:20:d0:5e:66:fc:53:fe:1a:50:88:2c:78:db:28:52:ca:e4:74 +# SHA256 Fingerprint: 16:af:57:a9:f6:76:b0:ab:12:60:95:aa:5e:ba:de:f2:2a:b3:11:19:d6:44:ac:95:cd:4b:93:db:f3:f2:6a:eb +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ +RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD +VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX +DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y +ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy +VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr +mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr +IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK +mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu +XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy +dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye +jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1 +BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3 +DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92 +9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx +jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0 +Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz +ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS +R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp +-----END CERTIFICATE----- + +# Issuer: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network +# Subject: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network +# Label: "AddTrust External Root" +# Serial: 1 +# MD5 Fingerprint: 1d:35:54:04:85:78:b0:3f:42:42:4d:bf:20:73:0a:3f +# SHA1 Fingerprint: 02:fa:f3:e2:91:43:54:68:60:78:57:69:4d:f5:e4:5b:68:85:18:68 +# SHA256 Fingerprint: 68:7f:a4:51:38:22:78:ff:f0:c8:b1:1f:8d:43:d5:76:67:1c:6e:b2:bc:ea:b4:13:fb:83:d9:65:d0:6d:2f:f2 +-----BEGIN CERTIFICATE----- +MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEU +MBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFs +IFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290 +MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEwNDgzOFowbzELMAkGA1UEBhMCU0Ux +FDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRUcnVzdCBFeHRlcm5h +bCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0EgUm9v +dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvt +H7xsD821+iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9 +uMq/NzgtHj6RQa1wVsfwTz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzX +mk6vBbOmcZSccbNQYArHE504B4YCqOmoaSYYkKtMsE8jqzpPhNjfzp/haW+710LX +a0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy2xSoRcRdKn23tNbE7qzN +E0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv77+ldU9U0 +WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYD +VR0PBAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0 +Jvf6xCZU7wO94CTLVBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRU +cnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsx +IjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENBIFJvb3SCAQEwDQYJKoZIhvcN +AQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZlj7DYd7usQWxH +YINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5 +6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvC +Nr4TDea9Y355e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEX +c4g/VhsxOBi0cQ+azcgOno4uG+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5a +mnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ= +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. +# Subject: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. +# Label: "Entrust Root Certification Authority" +# Serial: 1164660820 +# MD5 Fingerprint: d6:a5:c3:ed:5d:dd:3e:00:c1:3d:87:92:1f:1d:3f:e4 +# SHA1 Fingerprint: b3:1e:b1:b7:40:e3:6c:84:02:da:dc:37:d4:4d:f5:d4:67:49:52:f9 +# SHA256 Fingerprint: 73:c1:76:43:4f:1b:c6:d5:ad:f4:5b:0e:76:e7:27:28:7c:8d:e5:76:16:c1:e6:e6:14:1a:2b:2c:bc:7d:8e:4c +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0 +Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW +KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl +cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw +NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw +NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy +ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV +BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo +Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4 +4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9 +KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI +rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi +94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB +sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi +gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo +kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE +vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t +O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua +AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP +9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/ +eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m +0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Global CA O=GeoTrust Inc. +# Subject: CN=GeoTrust Global CA O=GeoTrust Inc. +# Label: "GeoTrust Global CA" +# Serial: 144470 +# MD5 Fingerprint: f7:75:ab:29:fb:51:4e:b7:77:5e:ff:05:3c:99:8e:f5 +# SHA1 Fingerprint: de:28:f4:a4:ff:e5:b9:2f:a3:c5:03:d1:a3:49:a7:f9:96:2a:82:12 +# SHA256 Fingerprint: ff:85:6a:2d:25:1d:cd:88:d3:66:56:f4:50:12:67:98:cf:ab:aa:de:40:79:9c:72:2d:e4:d2:b5:db:36:a7:3a +-----BEGIN CERTIFICATE----- +MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVT +MRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9i +YWwgQ0EwHhcNMDIwNTIxMDQwMDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQG +EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UEAxMSR2VvVHJ1c3Qg +R2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2swYYzD9 +9BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjoBbdq +fnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDv +iS2Aelet8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU +1XupGc1V3sjs0l44U+VcT4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+ +bw8HHa8sHo9gOeL6NlMTOdReJivbPagUvTLrGAMoUgRx5aszPeE4uwc2hGKceeoW +MPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTA +ephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVkDBF9qn1l +uMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKIn +Z57QzxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfS +tQWVYrmm3ok9Nns4d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcF +PseKUgzbFbS9bZvlxrFUaKnjaZC2mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Un +hw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6pXE0zX5IJL4hmXXeXxx12E6nV +5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvmMw== +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Universal CA O=GeoTrust Inc. +# Subject: CN=GeoTrust Universal CA O=GeoTrust Inc. +# Label: "GeoTrust Universal CA" +# Serial: 1 +# MD5 Fingerprint: 92:65:58:8b:a2:1a:31:72:73:68:5c:b4:a5:7a:07:48 +# SHA1 Fingerprint: e6:21:f3:35:43:79:05:9a:4b:68:30:9d:8a:2f:74:22:15:87:ec:79 +# SHA256 Fingerprint: a0:45:9b:9f:63:b2:25:59:f5:fa:5d:4c:6d:b3:f9:f7:2f:f1:93:42:03:35:78:f0:73:bf:1d:1b:46:cb:b9:12 +-----BEGIN CERTIFICATE----- +MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEW +MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVy +c2FsIENBMB4XDTA0MDMwNDA1MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UE +BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xHjAcBgNVBAMTFUdlb1RydXN0 +IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKYV +VaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9tJPi8 +cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTT +QjOgNB0eRXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFh +F7em6fgemdtzbvQKoiFs7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2v +c7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d8Lsrlh/eezJS/R27tQahsiFepdaVaH/w +mZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7VqnJNk22CDtucvc+081xd +VHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3CgaRr0BHdCX +teGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZ +f9hBZ3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfRe +Bi9Fi1jUIxaS5BZuKGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+ +nhutxx9z3SxPGWX9f5NAEC7S8O08ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB +/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0XG0D08DYj3rWMB8GA1UdIwQY +MBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG +9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc +aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fX +IwjhmF7DWgh2qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzyn +ANXH/KttgCJwpQzgXQQpAvvLoJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0z +uzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsKxr2EoyNB3tZ3b4XUhRxQ4K5RirqN +Pnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxFKyDuSN/n3QmOGKja +QI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2DFKW +koRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9 +ER/frslKxfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQt +DF4JbAiXfKM9fJP/P6EUp8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/Sfuvm +bJxPgWp6ZKy7PtXny3YuxadIwVyQD8vIP/rmMuGNG2+k5o7Y+SlIis5z/iw= +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Universal CA 2 O=GeoTrust Inc. +# Subject: CN=GeoTrust Universal CA 2 O=GeoTrust Inc. +# Label: "GeoTrust Universal CA 2" +# Serial: 1 +# MD5 Fingerprint: 34:fc:b8:d0:36:db:9e:14:b3:c2:f2:db:8f:e4:94:c7 +# SHA1 Fingerprint: 37:9a:19:7b:41:85:45:35:0c:a6:03:69:f3:3c:2e:af:47:4f:20:79 +# SHA256 Fingerprint: a0:23:4f:3b:c8:52:7c:a5:62:8e:ec:81:ad:5d:69:89:5d:a5:68:0d:c9:1d:1c:b8:47:7f:33:f8:78:b9:5b:0b +-----BEGIN CERTIFICATE----- +MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEW +MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVy +c2FsIENBIDIwHhcNMDQwMzA0MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYD +VQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1 +c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0DE81 +WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUG +FF+3Qs17j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdq +XbboW0W63MOhBW9Wjo8QJqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxL +se4YuU6W3Nx2/zu+z18DwPw76L5GG//aQMJS9/7jOvdqdzXQ2o3rXhhqMcceujwb +KNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2WP0+GfPtDCapkzj4T8Fd +IgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP20gaXT73 +y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRt +hAAnZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgoc +QIgfksILAAX/8sgCSqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4 +Lt1ZrtmhN79UNdxzMk+MBB4zsslG8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAfBgNV +HSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8EBAMCAYYwDQYJ +KoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z +dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQ +L1EuxBRa3ugZ4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgr +Fg5fNuH8KrUwJM/gYwx7WBr+mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSo +ag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpqA1Ihn0CoZ1Dy81of398j9tx4TuaY +T1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpgY+RdM4kX2TGq2tbz +GDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiPpm8m +1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJV +OCiNUW7dFGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH +6aLcr34YEoP9VhdBLtUpgn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwX +QMAJKOSLakhT2+zNVVXxxvjpoixMptEmX36vWkzaH6byHCx+rgIW0lbQL1dTR+iS +-----END CERTIFICATE----- + +# Issuer: CN=Visa eCommerce Root O=VISA OU=Visa International Service Association +# Subject: CN=Visa eCommerce Root O=VISA OU=Visa International Service Association +# Label: "Visa eCommerce Root" +# Serial: 25952180776285836048024890241505565794 +# MD5 Fingerprint: fc:11:b8:d8:08:93:30:00:6d:23:f9:7e:eb:52:1e:02 +# SHA1 Fingerprint: 70:17:9b:86:8c:00:a4:fa:60:91:52:22:3f:9f:3e:32:bd:e0:05:62 +# SHA256 Fingerprint: 69:fa:c9:bd:55:fb:0a:c7:8d:53:bb:ee:5c:f1:d5:97:98:9f:d0:aa:ab:20:a2:51:51:bd:f1:73:3e:e7:d1:22 +-----BEGIN CERTIFICATE----- +MIIDojCCAoqgAwIBAgIQE4Y1TR0/BvLB+WUF1ZAcYjANBgkqhkiG9w0BAQUFADBr +MQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRl +cm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNv +bW1lcmNlIFJvb3QwHhcNMDIwNjI2MDIxODM2WhcNMjIwNjI0MDAxNjEyWjBrMQsw +CQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRlcm5h +dGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNvbW1l +cmNlIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvV95WHm6h +2mCxlCfLF9sHP4CFT8icttD0b0/Pmdjh28JIXDqsOTPHH2qLJj0rNfVIsZHBAk4E +lpF7sDPwsRROEW+1QK8bRaVK7362rPKgH1g/EkZgPI2h4H3PVz4zHvtH8aoVlwdV +ZqW1LS7YgFmypw23RuwhY/81q6UCzyr0TP579ZRdhE2o8mCP2w4lPJ9zcc+U30rq +299yOIzzlr3xF7zSujtFWsan9sYXiwGd/BmoKoMWuDpI/k4+oKsGGelT84ATB+0t +vz8KPFUgOSwsAGl0lUq8ILKpeeUYiZGo3BxN77t+Nwtd/jmliFKMAGzsGHxBvfaL +dXe6YJ2E5/4tAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD +AgEGMB0GA1UdDgQWBBQVOIMPPyw/cDMezUb+B4wg4NfDtzANBgkqhkiG9w0BAQUF +AAOCAQEAX/FBfXxcCLkr4NWSR/pnXKUTwwMhmytMiUbPWU3J/qVAtmPN3XEolWcR +zCSs00Rsca4BIGsDoo8Ytyk6feUWYFN4PMCvFYP3j1IzJL1kk5fui/fbGKhtcbP3 +LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pzzkWKsKZJ/0x9nXGIxHYdkFsd +7v3M9+79YKWxehZx0RbQfBI8bGmX265fOZpwLwU8GUYEmSA20GBuYQa7FkKMcPcw +++DbZqMAAb3mLNqRX6BGi01qnD093QVG/na/oAo85ADmJ7f/hC3euiInlhBx6yLt +398znM/jra6O1I7mT1GvFpLgXPYHDw== +-----END CERTIFICATE----- + +# Issuer: CN=AAA Certificate Services O=Comodo CA Limited +# Subject: CN=AAA Certificate Services O=Comodo CA Limited +# Label: "Comodo AAA Services root" +# Serial: 1 +# MD5 Fingerprint: 49:79:04:b0:eb:87:19:ac:47:b0:bc:11:51:9b:74:d0 +# SHA1 Fingerprint: d1:eb:23:a4:6d:17:d6:8f:d9:25:64:c2:f1:f1:60:17:64:d8:e3:49 +# SHA256 Fingerprint: d7:a7:a0:fb:5d:7e:27:31:d7:71:e9:48:4e:bc:de:f7:1d:5f:0c:3e:0a:29:48:78:2b:c8:3e:e0:ea:69:9e:f4 +-----BEGIN CERTIFICATE----- +MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb +MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow +GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj +YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM +GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua +BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe +3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4 +YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR +rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm +ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU +oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v +QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t +b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF +AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q +GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz +Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2 +G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi +l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3 +smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root Certification Authority O=QuoVadis Limited OU=Root Certification Authority +# Subject: CN=QuoVadis Root Certification Authority O=QuoVadis Limited OU=Root Certification Authority +# Label: "QuoVadis Root CA" +# Serial: 985026699 +# MD5 Fingerprint: 27:de:36:fe:72:b7:00:03:00:9d:f4:f0:1e:6c:04:24 +# SHA1 Fingerprint: de:3f:40:bd:50:93:d3:9b:6c:60:f6:da:bc:07:62:01:00:89:76:c9 +# SHA256 Fingerprint: a4:5e:de:3b:bb:f0:9c:8a:e1:5c:72:ef:c0:72:68:d6:93:a2:1c:99:6f:d5:1e:67:ca:07:94:60:fd:6d:88:73 +-----BEGIN CERTIFICATE----- +MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJC +TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0 +aWZpY2F0aW9uIEF1dGhvcml0eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0 +aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAzMTkxODMzMzNaFw0yMTAzMTcxODMz +MzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUw +IwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQDEyVR +dW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Yp +li4kVEAkOPcahdxYTMukJ0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2D +rOpm2RgbaIr1VxqYuvXtdj182d6UajtLF8HVj71lODqV0D1VNk7feVcxKh7YWWVJ +WCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeLYzcS19Dsw3sgQUSj7cug +F+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWenAScOospU +xbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCC +Ak4wPQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVv +dmFkaXNvZmZzaG9yZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREw +ggENMIIBCQYJKwYBBAG+WAABMIH7MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNl +IG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmljYXRlIGJ5IGFueSBwYXJ0eSBh +c3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJsZSBzdGFuZGFy +ZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh +Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYI +KwYBBQUHAgEWFmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3T +KbkGGew5Oanwl4Rqy+/fMIGuBgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rq +y+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1p +dGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYD +VQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6tlCL +MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSk +fnIYj9lofFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf8 +7C9TqnN7Az10buYWnuulLsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1R +cHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2xgI4JVrmcGmD+XcHXetwReNDWXcG31a0y +mQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi5upZIof4l/UO/erMkqQW +xFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi5nrQNiOK +SnQ2+Q== +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 2 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 2 O=QuoVadis Limited +# Label: "QuoVadis Root CA 2" +# Serial: 1289 +# MD5 Fingerprint: 5e:39:7b:dd:f8:ba:ec:82:e9:ac:62:ba:0c:54:00:2b +# SHA1 Fingerprint: ca:3a:fb:cf:12:40:36:4b:44:b2:16:20:88:80:48:39:19:93:7c:f7 +# SHA256 Fingerprint: 85:a0:dd:7d:d7:20:ad:b7:ff:05:f8:3d:54:2b:20:9d:c7:ff:45:28:f7:d6:77:b1:83:89:fe:a5:e5:c4:9e:86 +-----BEGIN CERTIFICATE----- +MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMjAeFw0wNjExMjQxODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCa +GMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6XJxg +Fyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55J +WpzmM+Yklvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bB +rrcCaoF6qUWD4gXmuVbBlDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp ++ARz8un+XJiM9XOva7R+zdRcAitMOeGylZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1 +ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt66/3FsvbzSUr5R/7mp/i +Ucw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1JdxnwQ5hYIiz +PtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og +/zOhD7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UH +oycR7hYQe7xFSkyyBNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuI +yV77zGHcizN300QyNQliBJIWENieJ0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1Ud +EwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBQahGK8SEwzJQTU7tD2 +A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGUa6FJpEcwRTEL +MAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT +ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2f +BluornFdLwUvZ+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzn +g/iN/Ae42l9NLmeyhP3ZRPx3UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2Bl +fF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodmVjB3pjd4M1IQWK4/YY7yarHvGH5K +WWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK+JDSV6IZUaUtl0Ha +B0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrWIozc +hLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPR +TUIZ3Ph1WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWD +mbA4CD/pXvk1B+TJYm5Xf6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0Z +ohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y +4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8VCLAAVBpQ570su9t+Oza +8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 3" +# Serial: 1478 +# MD5 Fingerprint: 31:85:3c:62:94:97:63:b9:aa:fd:89:4e:af:6f:e0:cf +# SHA1 Fingerprint: 1f:49:14:f7:d8:74:95:1d:dd:ae:02:c0:be:fd:3a:2d:82:75:51:85 +# SHA256 Fingerprint: 18:f1:fc:7f:20:5d:f8:ad:dd:eb:7f:e0:07:dd:57:e3:af:37:5a:9c:4d:8d:73:54:6b:f4:f1:fe:d1:e1:8d:35 +-----BEGIN CERTIFICATE----- +MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMzAeFw0wNjExMjQxOTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDM +V0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNggDhoB +4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUr +H556VOijKTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd +8lyyBTNvijbO0BNO/79KDDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9Cabwv +vWhDFlaJKjdhkf2mrk7AyxRllDdLkgbvBNDInIjbC3uBr7E9KsRlOni27tyAsdLT +mZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwpp5ijJUMv7/FfJuGITfhe +btfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8nT8KKdjc +T5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDt +WAEXMJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZ +c6tsgLjoC2SToJyMGf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A +4iLItLRkT9a6fUg+qGkM17uGcclzuD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYD +VR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHTBgkrBgEEAb5YAAMwgcUwgZMG +CCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmljYXRlIGNvbnN0 +aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 +aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVu +dC4wLQYIKwYBBQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2Nw +czALBgNVHQ8EBAMCAQYwHQYDVR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4G +A1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4ywLQoUmkRzBFMQswCQYDVQQGEwJC +TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UEAxMSUXVvVmFkaXMg +Um9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZVqyM0 +7ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSem +d1o417+shvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd ++LJ2w/w4E6oM3kJpK27zPOuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B +4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadN +t54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp8kokUvd0/bpO5qgdAm6x +DYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBCbjPsMZ57 +k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6s +zHXug/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0j +Wy10QJLZYxkNc91pvGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeT +mJlglFwjz1onl14LBQaTNx47aTbrqZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK +4SVhM7JZG+Ju1zdXtg2pEto= +-----END CERTIFICATE----- + +# Issuer: O=SECOM Trust.net OU=Security Communication RootCA1 +# Subject: O=SECOM Trust.net OU=Security Communication RootCA1 +# Label: "Security Communication Root CA" +# Serial: 0 +# MD5 Fingerprint: f1:bc:63:6a:54:e0:b5:27:f5:cd:e7:1a:e3:4d:6e:4a +# SHA1 Fingerprint: 36:b1:2b:49:f9:81:9e:d7:4c:9e:bc:38:0f:c6:56:8f:5d:ac:b2:f7 +# SHA256 Fingerprint: e7:5e:72:ed:9f:56:0e:ec:6e:b4:80:00:73:a4:3f:c3:ad:19:19:5a:39:22:82:01:78:95:97:4a:99:02:6b:6c +-----BEGIN CERTIFICATE----- +MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEY +MBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21t +dW5pY2F0aW9uIFJvb3RDQTEwHhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5 +WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYD +VQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw8yl8 +9f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJ +DKaVv0uMDPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9 +Ms+k2Y7CI9eNqPPYJayX5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/N +QV3Is00qVUarH9oe4kA92819uZKAnDfdDJZkndwi92SL32HeFZRSFaB9UslLqCHJ +xrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2JChzAgMBAAGjPzA9MB0G +A1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYwDwYDVR0T +AQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vG +kl3g0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfr +Uj94nK9NrvjVT8+amCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5 +Bw+SUEmK3TGXX8npN6o7WWWXlDLJs58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJU +JRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ6rBK+1YWc26sTfcioU+tHXot +RSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAiFL39vmwLAw== +-----END CERTIFICATE----- + +# Issuer: CN=Sonera Class2 CA O=Sonera +# Subject: CN=Sonera Class2 CA O=Sonera +# Label: "Sonera Class 2 Root CA" +# Serial: 29 +# MD5 Fingerprint: a3:ec:75:0f:2e:88:df:fa:48:01:4e:0b:5c:48:6f:fb +# SHA1 Fingerprint: 37:f7:6d:e6:07:7c:90:c5:b1:3e:93:1a:b7:41:10:b4:f2:e4:9a:27 +# SHA256 Fingerprint: 79:08:b4:03:14:c1:38:10:0b:51:8d:07:35:80:7f:fb:fc:f8:51:8a:00:95:33:71:05:ba:38:6b:15:3d:d9:27 +-----BEGIN CERTIFICATE----- +MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEP +MA0GA1UEChMGU29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAx +MDQwNjA3Mjk0MFoXDTIxMDQwNjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNV +BAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJhIENsYXNzMiBDQTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3/Ei9vX+ALTU74W+o +Z6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybTdXnt +5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s +3TmVToMGf+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2Ej +vOr7nQKV0ba5cTppCD8PtOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu +8nYybieDwnPz3BjotJPqdURrBGAgcVeHnfO+oJAjPYok4doh28MCAwEAAaMzMDEw +DwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITTXjwwCwYDVR0PBAQDAgEG +MA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt0jSv9zil +zqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/ +3DEIcbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvD +FNr450kkkdAdavphOe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6 +Tk6ezAyNlNzZRZxe7EJQY670XcSxEtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2 +ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLHllpwrN9M +-----END CERTIFICATE----- + +# Issuer: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com +# Subject: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com +# Label: "XRamp Global CA Root" +# Serial: 107108908803651509692980124233745014957 +# MD5 Fingerprint: a1:0b:44:b3:ca:10:d8:00:6e:9d:0f:d8:0f:92:0a:d1 +# SHA1 Fingerprint: b8:01:86:d1:eb:9c:86:a5:41:04:cf:30:54:f3:4c:52:b7:e5:58:c6 +# SHA256 Fingerprint: ce:cd:dc:90:50:99:d8:da:df:c5:b1:d2:09:b7:37:cb:e2:c1:8c:fb:2c:10:c0:ff:0b:cf:0d:32:86:fc:1a:a2 +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCB +gjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEk +MCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRY +UmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQxMTAxMTcx +NDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3 +dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2Vy +dmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS6 +38eMpSe2OAtp87ZOqCwuIR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCP +KZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMxfoArtYzAQDsRhtDLooY2YKTVMIJt2W7Q +DxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FEzG+gSqmUsE3a56k0enI4 +qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqsAxcZZPRa +JSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNVi +PvryxS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0P +BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASs +jVy16bYbMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0 +eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQEwDQYJKoZIhvcNAQEFBQAD +ggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc/Kh4ZzXxHfAR +vbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt +qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLa +IR9NmXmd4c8nnxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSy +i6mx5O+aGtA9aZnuqCij4Tyz8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQ +O+7ETPTsJ3xCwnR8gooJybQDJbw= +-----END CERTIFICATE----- + +# Issuer: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority +# Subject: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority +# Label: "Go Daddy Class 2 CA" +# Serial: 0 +# MD5 Fingerprint: 91:de:06:25:ab:da:fd:32:17:0c:bb:25:17:2a:84:67 +# SHA1 Fingerprint: 27:96:ba:e6:3f:18:01:e2:77:26:1b:a0:d7:77:70:02:8f:20:ee:e4 +# SHA256 Fingerprint: c3:84:6b:f2:4b:9e:93:ca:64:27:4c:0e:c6:7c:1e:cc:5e:02:4f:fc:ac:d2:d7:40:19:35:0e:81:fe:54:6a:e4 +-----BEGIN CERTIFICATE----- +MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh +MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE +YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3 +MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo +ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg +MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN +ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA +PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w +wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi +EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY +avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+ +YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE +sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h +/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5 +IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD +ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy +OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P +TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ +HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER +dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf +ReYNnyicsbkqWletNw+vHX/bvZ8= +-----END CERTIFICATE----- + +# Issuer: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority +# Subject: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority +# Label: "Starfield Class 2 CA" +# Serial: 0 +# MD5 Fingerprint: 32:4a:4b:bb:c8:63:69:9b:be:74:9a:c6:dd:1d:46:24 +# SHA1 Fingerprint: ad:7e:1c:28:b0:64:ef:8f:60:03:40:20:14:c3:d0:e3:37:0e:b5:8a +# SHA256 Fingerprint: 14:65:fa:20:53:97:b8:76:fa:a6:f0:a9:95:8e:55:90:e4:0f:cc:7f:aa:4f:b7:c2:c8:67:75:21:fb:5f:b6:58 +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl +MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp +U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw +NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE +ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp +ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3 +DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf +8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN ++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0 +X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa +K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA +1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G +A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR +zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0 +YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD +bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w +DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3 +L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D +eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp +VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY +WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- + +# Issuer: O=Government Root Certification Authority +# Subject: O=Government Root Certification Authority +# Label: "Taiwan GRCA" +# Serial: 42023070807708724159991140556527066870 +# MD5 Fingerprint: 37:85:44:53:32:45:1f:20:f0:f3:95:e1:25:c4:43:4e +# SHA1 Fingerprint: f4:8b:11:bf:de:ab:be:94:54:20:71:e6:41:de:6b:be:88:2b:40:b9 +# SHA256 Fingerprint: 76:00:29:5e:ef:e8:5b:9e:1f:d6:24:db:76:06:2a:aa:ae:59:81:8a:54:d2:77:4c:d4:c0:b2:c0:11:31:e1:b3 +-----BEGIN CERTIFICATE----- +MIIFcjCCA1qgAwIBAgIQH51ZWtcvwgZEpYAIaeNe9jANBgkqhkiG9w0BAQUFADA/ +MQswCQYDVQQGEwJUVzEwMC4GA1UECgwnR292ZXJubWVudCBSb290IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5MB4XDTAyMTIwNTEzMjMzM1oXDTMyMTIwNTEzMjMzM1ow +PzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dvdmVybm1lbnQgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB +AJoluOzMonWoe/fOW1mKydGGEghU7Jzy50b2iPN86aXfTEc2pBsBHH8eV4qNw8XR +IePaJD9IK/ufLqGU5ywck9G/GwGHU5nOp/UKIXZ3/6m3xnOUT0b3EEk3+qhZSV1q +gQdW8or5BtD3cCJNtLdBuTK4sfCxw5w/cP1T3YGq2GN49thTbqGsaoQkclSGxtKy +yhwOeYHWtXBiCAEuTk8O1RGvqa/lmr/czIdtJuTJV6L7lvnM4T9TjGxMfptTCAts +F/tnyMKtsc2AtJfcdgEWFelq16TheEfOhtX7MfP6Mb40qij7cEwdScevLJ1tZqa2 +jWR+tSBqnTuBto9AAGdLiYa4zGX+FVPpBMHWXx1E1wovJ5pGfaENda1UhhXcSTvx +ls4Pm6Dso3pdvtUqdULle96ltqqvKKyskKw4t9VoNSZ63Pc78/1Fm9G7Q3hub/FC +VGqY8A2tl+lSXunVanLeavcbYBT0peS2cWeqH+riTcFCQP5nRhc4L0c/cZyu5SHK +YS1tB6iEfC3uUSXxY5Ce/eFXiGvviiNtsea9P63RPZYLhY3Naye7twWb7LuRqQoH +EgKXTiCQ8P8NHuJBO9NAOueNXdpm5AKwB1KYXA6OM5zCppX7VRluTI6uSw+9wThN +Xo+EHWbNxWCWtFJaBYmOlXqYwZE8lSOyDvR5tMl8wUohAgMBAAGjajBoMB0GA1Ud +DgQWBBTMzO/MKWCkO7GStjz6MmKPrCUVOzAMBgNVHRMEBTADAQH/MDkGBGcqBwAE +MTAvMC0CAQAwCQYFKw4DAhoFADAHBgVnKgMAAAQUA5vwIhP/lSg209yewDL7MTqK +UWUwDQYJKoZIhvcNAQEFBQADggIBAECASvomyc5eMN1PhnR2WPWus4MzeKR6dBcZ +TulStbngCnRiqmjKeKBMmo4sIy7VahIkv9Ro04rQ2JyftB8M3jh+Vzj8jeJPXgyf +qzvS/3WXy6TjZwj/5cAWtUgBfen5Cv8b5Wppv3ghqMKnI6mGq3ZW6A4M9hPdKmaK +ZEk9GhiHkASfQlK3T8v+R0F2Ne//AHY2RTKbxkaFXeIksB7jSJaYV0eUVXoPQbFE +JPPB/hprv4j9wabak2BegUqZIJxIZhm1AHlUD7gsL0u8qV1bYH+Mh6XgUmMqvtg7 +hUAV/h62ZT/FS9p+tXo1KaMuephgIqP0fSdOLeq0dDzpD6QzDxARvBMB1uUO07+1 +EqLhRSPAzAhuYbeJq4PjJB7mXQfnHyA+z2fI56wwbSdLaG5LKlwCCDTb+HbkZ6Mm +nD+iMsJKxYEYMRBWqoTvLQr/uB930r+lWKBi5NdLkXWNiYCYfm3LU05er/ayl4WX +udpVBrkk7tfGOB5jGxI7leFYrPLfhNVfmS8NVVvmONsuP3LpSIXLuykTjx44Vbnz +ssQwmSNOXfJIoRIM3BKQCZBUkQM8R+XVyWXgt0t97EfTsws+rZ7QdAAO671RrcDe +LMDDav7v3Aun+kbfYNucpllQdSNpc5Oy+fwC00fmcc4QAu4njIT/rEUNE1yDMuAl +pYYsfPQS +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root CA" +# Serial: 17154717934120587862167794914071425081 +# MD5 Fingerprint: 87:ce:0b:7b:2a:0e:49:00:e1:58:71:9b:37:a8:93:72 +# SHA1 Fingerprint: 05:63:b8:63:0d:62:d7:5a:bb:c8:ab:1e:4b:df:b5:a8:99:b2:4d:43 +# SHA256 Fingerprint: 3e:90:99:b5:01:5e:8f:48:6c:00:bc:ea:9d:11:1e:e7:21:fa:ba:35:5a:89:bc:f1:df:69:56:1e:3d:c6:32:5c +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c +JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP +mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+ +wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4 +VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/ +AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB +AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun +pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC +dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf +fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm +NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx +H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root CA" +# Serial: 10944719598952040374951832963794454346 +# MD5 Fingerprint: 79:e4:a9:84:0d:7d:3a:96:d7:c0:4f:e2:43:4c:89:2e +# SHA1 Fingerprint: a8:98:5d:3a:65:e5:e5:c4:b2:d7:d6:6d:40:c6:dd:2f:b1:9c:54:36 +# SHA256 Fingerprint: 43:48:a0:e9:44:4c:78:cb:26:5e:05:8d:5e:89:44:b4:d8:4f:96:62:bd:26:db:25:7f:89:34:a4:43:c7:01:61 +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD +QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB +CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97 +nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt +43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P +T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4 +gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO +BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR +TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw +DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr +hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg +06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF +PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls +YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert High Assurance EV Root CA" +# Serial: 3553400076410547919724730734378100087 +# MD5 Fingerprint: d4:74:de:57:5c:39:b2:d3:9c:85:83:c5:c0:65:49:8a +# SHA1 Fingerprint: 5f:b7:ee:06:33:e2:59:db:ad:0c:4c:9a:e6:d3:8f:1a:61:c7:dc:25 +# SHA256 Fingerprint: 74:31:e5:f4:c3:c1:ce:46:90:77:4f:0b:61:e0:54:40:88:3b:a9:a0:1e:d0:0b:a6:ab:d7:80:6e:d3:b1:18:cf +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j +ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL +MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3 +LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug +RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm ++9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW +PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM +xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB +Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3 +hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg +EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA +FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec +nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z +eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF +hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2 +Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep ++OkuE6N36B9K +-----END CERTIFICATE----- + +# Issuer: CN=Class 2 Primary CA O=Certplus +# Subject: CN=Class 2 Primary CA O=Certplus +# Label: "Certplus Class 2 Primary CA" +# Serial: 177770208045934040241468760488327595043 +# MD5 Fingerprint: 88:2c:8c:52:b8:a2:3c:f3:f7:bb:03:ea:ae:ac:42:0b +# SHA1 Fingerprint: 74:20:74:41:72:9c:dd:92:ec:79:31:d8:23:10:8d:c2:81:92:e2:bb +# SHA256 Fingerprint: 0f:99:3c:8a:ef:97:ba:af:56:87:14:0e:d5:9a:d1:82:1b:b4:af:ac:f0:aa:9a:58:b5:d5:7a:33:8a:3a:fb:cb +-----BEGIN CERTIFICATE----- +MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAw +PTELMAkGA1UEBhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFz +cyAyIFByaW1hcnkgQ0EwHhcNOTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9 +MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2VydHBsdXMxGzAZBgNVBAMTEkNsYXNz +IDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANxQ +ltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR5aiR +VhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyL +kcAbmXuZVg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCd +EgETjdyAYveVqUSISnFOYFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yas +H7WLO7dDWWuwJKZtkIvEcupdM5i3y95ee++U8Rs+yskhwcWYAqqi9lt3m/V+llU0 +HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRMECDAGAQH/AgEKMAsGA1Ud +DwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJYIZIAYb4 +QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMu +Y29tL0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/ +AN9WM2K191EBkOvDP9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8 +yfFC82x/xXp8HVGIutIKPidd3i1RTtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMR +FcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+7UCmnYR0ObncHoUW2ikbhiMA +ybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW//1IMwrh3KWB +kJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7 +l7+ijrRU +-----END CERTIFICATE----- + +# Issuer: CN=DST Root CA X3 O=Digital Signature Trust Co. +# Subject: CN=DST Root CA X3 O=Digital Signature Trust Co. +# Label: "DST Root CA X3" +# Serial: 91299735575339953335919266965803778155 +# MD5 Fingerprint: 41:03:52:dc:0f:f7:50:1b:16:f0:02:8e:ba:6f:45:c5 +# SHA1 Fingerprint: da:c9:02:4f:54:d8:f6:df:94:93:5f:b1:73:26:38:ca:6a:d7:7c:13 +# SHA256 Fingerprint: 06:87:26:03:31:a7:24:03:d9:09:f1:05:e6:9b:cf:0d:32:e1:bd:24:93:ff:c6:d9:20:6d:11:bc:d6:77:07:39 +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/ +MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT +DkRTVCBSb290IENBIFgzMB4XDTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVow +PzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMRcwFQYDVQQD +Ew5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmTrE4O +rz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEq +OLl5CjH9UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9b +xiqKqy69cK3FCxolkHRyxXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw +7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40dutolucbY38EVAjqr2m7xPi71XAicPNaD +aeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQMA0GCSqG +SIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69 +ikugdB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXr +AvHRAosZy5Q6XkjEGB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZz +R8srzJmwN0jP41ZL9c8PDHIyh8bwRLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5 +JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubSfZGL+T0yjWW06XyxV3bqxbYo +Ob8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ +-----END CERTIFICATE----- + +# Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG +# Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG +# Label: "SwissSign Gold CA - G2" +# Serial: 13492815561806991280 +# MD5 Fingerprint: 24:77:d9:a8:91:d1:3b:fa:88:2d:c2:ff:f8:cd:33:93 +# SHA1 Fingerprint: d8:c5:38:8a:b7:30:1b:1b:6e:d4:7a:e6:45:25:3a:6f:9f:1a:27:61 +# SHA256 Fingerprint: 62:dd:0b:e9:b9:f5:0a:16:3e:a0:f8:e7:5c:05:3b:1e:ca:57:ea:55:c8:68:8f:64:7c:68:81:f2:c8:35:7b:95 +-----BEGIN CERTIFICATE----- +MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV +BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2ln +biBHb2xkIENBIC0gRzIwHhcNMDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBF +MQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMR8wHQYDVQQDExZT +d2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC +CgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUqt2/8 +76LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+ +bbqBHH5CjCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c +6bM8K8vzARO/Ws/BtQpgvd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqE +emA8atufK+ze3gE/bk3lUIbLtK/tREDFylqM2tIrfKjuvqblCqoOpd8FUrdVxyJd +MmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvRAiTysybUa9oEVeXBCsdt +MDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuendjIj3o02y +MszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69y +FGkOpeUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPi +aG59je883WX0XaxR7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxM +gI93e2CaHt+28kgeDrpOVG2Y4OGiGqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCB +qTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWyV7 +lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64OfPAeGZe6Drn +8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov +L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe6 +45R88a7A3hfm5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczO +UYrHUDFu4Up+GC9pWbY9ZIEr44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5 +O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOfMke6UiI0HTJ6CVanfCU2qT1L2sCC +bwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6mGu6uLftIdxf+u+yv +GPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxpmo/a +77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCC +hdiDyyJkvC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid3 +92qgQmwLOM7XdVAyksLfKzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEpp +Ld6leNcG2mqeSz53OiATIgHQv2ieY2BrNU0LbbqhPcCT4H8js1WtciVORvnSFu+w +ZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6LqjviOvrv1vA+ACOzB2+htt +Qc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ +-----END CERTIFICATE----- + +# Issuer: CN=SwissSign Silver CA - G2 O=SwissSign AG +# Subject: CN=SwissSign Silver CA - G2 O=SwissSign AG +# Label: "SwissSign Silver CA - G2" +# Serial: 5700383053117599563 +# MD5 Fingerprint: e0:06:a1:c9:7d:cf:c9:fc:0d:c0:56:75:96:d8:62:13 +# SHA1 Fingerprint: 9b:aa:e5:9f:56:ee:21:cb:43:5a:be:25:93:df:a7:f0:40:d1:1d:cb +# SHA256 Fingerprint: be:6c:4d:a2:bb:b9:ba:59:b6:f3:93:97:68:37:42:46:c3:c0:05:99:3f:a9:8f:02:0d:1d:ed:be:d4:8a:81:d5 +-----BEGIN CERTIFICATE----- +MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UE +BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWdu +IFNpbHZlciBDQSAtIEcyMB4XDTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0Nlow +RzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMY +U3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644N0Mv +Fz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7br +YT7QbNHm+/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieF +nbAVlDLaYQ1HTWBCrpJH6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH +6ATK72oxh9TAtvmUcXtnZLi2kUpCe2UuMGoM9ZDulebyzYLs2aFK7PayS+VFheZt +eJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5hqAaEuSh6XzjZG6k4sIN/ +c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5FZGkECwJ +MoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRH +HTBsROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTf +jNFusB3hB48IHpmccelM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb6 +5i/4z3GcRm25xBWNOHkDRUjvxF3XCO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOB +rDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU +F6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRBtjpbO8tFnb0c +wpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0 +cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIB +AHPGgeAn0i0P4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShp +WJHckRE1qTodvBqlYJ7YH39FkWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9 +xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L3XWgwF15kIwb4FDm3jH+mHtwX6WQ +2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx/uNncqCxv1yL5PqZ +IseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFaDGi8 +aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2X +em1ZqSqPe97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQR +dAtq/gsD/KNVV4n+SsuuWxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/ +OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJDIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+ +hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ubDgEj8Z+7fNzcbBGXJbLy +tGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc. +# Subject: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc. +# Label: "GeoTrust Primary Certification Authority" +# Serial: 32798226551256963324313806436981982369 +# MD5 Fingerprint: 02:26:c3:01:5e:08:30:37:43:a9:d0:7d:cf:37:e6:bf +# SHA1 Fingerprint: 32:3c:11:8e:1b:f7:b8:b6:52:54:e2:e2:10:0d:d6:02:90:37:f0:96 +# SHA256 Fingerprint: 37:d5:10:06:c5:12:ea:ab:62:64:21:f1:ec:8c:92:01:3f:c5:f8:2a:e9:8e:e5:33:eb:46:19:b8:de:b4:d0:6c +-----BEGIN CERTIFICATE----- +MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBY +MQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMo +R2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEx +MjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgxCzAJBgNVBAYTAlVTMRYwFAYDVQQK +Ew1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQcmltYXJ5IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9 +AWbK7hWNb6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjA +ZIVcFU2Ix7e64HXprQU9nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE0 +7e9GceBrAqg1cmuXm2bgyxx5X9gaBGgeRwLmnWDiNpcB3841kt++Z8dtd1k7j53W +kBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGttm/81w7a4DSwDRp35+MI +mO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJ +KoZIhvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ1 +6CePbJC/kRYkRj5KTs4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl +4b7UVXGYNTq+k+qurUKykG/g/CFNNWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6K +oKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHaFloxt/m0cYASSJlyc1pZU8Fj +UjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG1riR/aYNKxoU +AT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk= +-----END CERTIFICATE----- + +# Issuer: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only +# Subject: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only +# Label: "thawte Primary Root CA" +# Serial: 69529181992039203566298953787712940909 +# MD5 Fingerprint: 8c:ca:dc:0b:22:ce:f5:be:72:ac:41:1a:11:a8:d8:12 +# SHA1 Fingerprint: 91:c6:d6:ee:3e:8a:c8:63:84:e5:48:c2:99:29:5c:75:6c:81:7b:81 +# SHA256 Fingerprint: 8d:72:2f:81:a9:c1:13:c0:79:1d:f1:36:a2:96:6d:b2:6c:95:0a:97:1d:b4:6b:41:99:f4:ea:54:b7:8b:fb:9f +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCB +qTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf +Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw +MDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNV +BAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3MDAwMDAwWhcNMzYw +NzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5j +LjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYG +A1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsoPD7gFnUnMekz52hWXMJEEUMDSxuaPFs +W0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ1CRfBsDMRJSUjQJib+ta +3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGcq/gcfomk +6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6 +Sk/KaAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94J +NqR32HuHUETVPm4pafs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XP +r87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUFAAOCAQEAeRHAS7ORtvzw6WfU +DW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeEuzLlQRHAd9mz +YJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX +xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2 +/qxAeeWsEG89jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/ +LHbTY5xZ3Y+m4Q6gLkH3LpVHz7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7 +jVaMaA== +-----END CERTIFICATE----- + +# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only +# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only +# Label: "VeriSign Class 3 Public Primary Certification Authority - G5" +# Serial: 33037644167568058970164719475676101450 +# MD5 Fingerprint: cb:17:e4:31:67:3e:e2:09:fe:45:57:93:f3:0a:fa:1c +# SHA1 Fingerprint: 4e:b6:d5:78:49:9b:1c:cf:5f:58:1e:ad:56:be:3d:9b:67:44:a5:e5 +# SHA256 Fingerprint: 9a:cf:ab:7e:43:c8:d8:80:d0:6b:26:2a:94:de:ee:e4:b4:65:99:89:c3:d0:ca:f1:9b:af:64:05:e4:1a:b7:df +-----BEGIN CERTIFICATE----- +MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCB +yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL +ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJp +U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxW +ZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW +ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp +U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y +aXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvJAgIKXo1 +nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKzj/i5Vbex +t0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIz +SdhDY2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQG +BO+QueQA5N06tRn/Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+ +rCpSx4/VBEnkjWNHiDxpg8v+R70rfk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/ +NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8E +BAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEwHzAH +BgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy +aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKv +MzEzMA0GCSqGSIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzE +p6B4Eq1iDkVwZMXnl2YtmAl+X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y +5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKEKQsTb47bDN0lAtukixlE0kF6BWlK +WE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiCKm0oHw0LxOXnGiYZ +4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vEZV8N +hnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq +-----END CERTIFICATE----- + +# Issuer: CN=SecureTrust CA O=SecureTrust Corporation +# Subject: CN=SecureTrust CA O=SecureTrust Corporation +# Label: "SecureTrust CA" +# Serial: 17199774589125277788362757014266862032 +# MD5 Fingerprint: dc:32:c3:a7:6d:25:57:c7:68:09:9d:ea:2d:a9:a2:d1 +# SHA1 Fingerprint: 87:82:c6:c3:04:35:3b:cf:d2:96:92:d2:59:3e:7d:44:d9:34:ff:11 +# SHA256 Fingerprint: f1:c1:b5:0a:e5:a2:0d:d8:03:0e:c9:f6:bc:24:82:3d:d3:67:b5:25:57:59:b4:e7:1b:61:fc:e9:f7:37:5d:73 +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz +MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv +cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz +Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO +0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao +wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj +7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS +8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT +BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg +JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC +NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3 +6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/ +3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm +D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS +CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- + +# Issuer: CN=Secure Global CA O=SecureTrust Corporation +# Subject: CN=Secure Global CA O=SecureTrust Corporation +# Label: "Secure Global CA" +# Serial: 9751836167731051554232119481456978597 +# MD5 Fingerprint: cf:f4:27:0d:d4:ed:dc:65:16:49:6d:3d:da:bf:6e:de +# SHA1 Fingerprint: 3a:44:73:5a:e5:81:90:1f:24:86:61:46:1e:3b:9c:c4:5f:f5:3a:1b +# SHA256 Fingerprint: 42:00:f5:04:3a:c8:59:0e:bb:52:7d:20:9e:d1:50:30:29:fb:cb:d4:1c:a1:b5:06:ec:27:f1:5a:de:7d:ac:69 +-----BEGIN CERTIFICATE----- +MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBK +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +GTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkx +MjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3Qg +Q29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jxYDiJ +iQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa +/FHtaMbQbqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJ +jnIFHovdRIWCQtBJwB1g8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnI +HmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYVHDGA76oYa8J719rO+TMg1fW9ajMtgQT7 +sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi0XPnj3pDAgMBAAGjgZ0w +gZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCsw +KaAnoCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsG +AQQBgjcVAQQDAgEAMA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0L +URYD7xh8yOOvaliTFGCRsoTciE6+OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXO +H0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cnCDpOGR86p1hcF895P4vkp9Mm +I50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/53CYNv6ZHdAbY +iNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc +f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW +-----END CERTIFICATE----- + +# Issuer: CN=COMODO Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO Certification Authority O=COMODO CA Limited +# Label: "COMODO Certification Authority" +# Serial: 104350513648249232941998508985834464573 +# MD5 Fingerprint: 5c:48:dc:f7:42:72:ec:56:94:6d:1c:cc:71:35:80:75 +# SHA1 Fingerprint: 66:31:bf:9e:f7:4f:9e:b6:c9:d5:a6:0c:ba:6a:be:d1:f7:bd:ef:7b +# SHA256 Fingerprint: 0c:2c:d6:3d:f7:80:6f:a3:99:ed:e8:09:11:6b:57:5b:f8:79:89:f0:65:18:f9:80:8c:86:05:03:17:8b:af:66 +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB +gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV +BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw +MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl +YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P +RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3 +UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI +2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8 +Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp ++2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+ +DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O +nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW +/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g +PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u +QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY +SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv +IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4 +zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd +BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB +ZQ== +-----END CERTIFICATE----- + +# Issuer: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C. +# Subject: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C. +# Label: "Network Solutions Certificate Authority" +# Serial: 116697915152937497490437556386812487904 +# MD5 Fingerprint: d3:f3:a6:16:c0:fa:6b:1d:59:b1:2d:96:4d:0e:11:2e +# SHA1 Fingerprint: 74:f8:a3:c3:ef:e7:b3:90:06:4b:83:90:3c:21:64:60:20:e5:df:ce +# SHA256 Fingerprint: 15:f0:ba:00:a3:ac:7a:f3:ac:88:4c:07:2b:10:11:a0:77:bd:77:c0:97:f4:01:64:b2:f8:59:8a:bd:83:86:0c +-----BEGIN CERTIFICATE----- +MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBi +MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu +MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3Jp +dHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMxMjM1OTU5WjBiMQswCQYDVQQGEwJV +UzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydO +ZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwz +c7MEL7xxjOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPP +OCwGJgl6cvf6UDL4wpPTaaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rl +mGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXTcrA/vGp97Eh/jcOrqnErU2lBUzS1sLnF +BgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc/Qzpf14Dl847ABSHJ3A4 +qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMBAAGjgZcw +gZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwu +bmV0c29sc3NsLmNvbS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3Jp +dHkuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc8 +6fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q4LqILPxFzBiwmZVRDuwduIj/ +h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/GGUsyfJj4akH +/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv +wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHN +pGxlaKFJdlxDydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey +-----END CERTIFICATE----- + +# Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited +# Label: "COMODO ECC Certification Authority" +# Serial: 41578283867086692638256921589707938090 +# MD5 Fingerprint: 7c:62:ff:74:9d:31:53:5e:68:4a:d5:78:aa:1e:bf:23 +# SHA1 Fingerprint: 9f:74:4e:9f:2b:4d:ba:ec:0f:31:2c:50:b6:56:3b:8e:2d:93:c3:11 +# SHA256 Fingerprint: 17:93:92:7a:06:14:54:97:89:ad:ce:2f:8f:34:f7:f0:b6:6d:0f:3a:e3:a3:b8:4d:21:ec:15:db:ba:4f:ad:c7 +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT +IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw +MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy +ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N +T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR +FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J +cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW +BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm +fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv +GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- + +# Issuer: CN=OISTE WISeKey Global Root GA CA O=WISeKey OU=Copyright (c) 2005/OISTE Foundation Endorsed +# Subject: CN=OISTE WISeKey Global Root GA CA O=WISeKey OU=Copyright (c) 2005/OISTE Foundation Endorsed +# Label: "OISTE WISeKey Global Root GA CA" +# Serial: 86718877871133159090080555911823548314 +# MD5 Fingerprint: bc:6c:51:33:a7:e9:d3:66:63:54:15:72:1b:21:92:93 +# SHA1 Fingerprint: 59:22:a1:e1:5a:ea:16:35:21:f8:98:39:6a:46:46:b0:44:1b:0f:a9 +# SHA256 Fingerprint: 41:c9:23:86:6a:b4:ca:d6:b7:ad:57:80:81:58:2e:02:07:97:a6:cb:df:4f:ff:78:ce:83:96:b3:89:37:d7:f5 +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCB +ijELMAkGA1UEBhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHly +aWdodCAoYykgMjAwNTEiMCAGA1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNl +ZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwgUm9vdCBHQSBDQTAeFw0w +NTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYDVQQGEwJDSDEQMA4G +A1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIwIAYD +VQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBX +SVNlS2V5IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAy0+zAJs9Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxR +VVuuk+g3/ytr6dTqvirdqFEr12bDYVxgAsj1znJ7O7jyTmUIms2kahnBAbtzptf2 +w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbDd50kc3vkDIzh2TbhmYsF +mQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ/yxViJGg +4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t9 +4B3RLoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQw +EAYJKwYBBAGCNxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOx +SPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vImMMkQyh2I+3QZH4VFvbBsUfk2 +ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4+vg1YFkCExh8 +vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa +hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZi +Fj4A4xylNoEYokxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ +/L7fCg0= +-----END CERTIFICATE----- + +# Issuer: CN=Certigna O=Dhimyotis +# Subject: CN=Certigna O=Dhimyotis +# Label: "Certigna" +# Serial: 18364802974209362175 +# MD5 Fingerprint: ab:57:a6:5b:7d:42:82:19:b5:d8:58:26:28:5e:fd:ff +# SHA1 Fingerprint: b1:2e:13:63:45:86:a4:6f:1a:b2:60:68:37:58:2d:c4:ac:fd:94:97 +# SHA256 Fingerprint: e3:b6:a2:db:2e:d7:ce:48:84:2f:7a:c5:32:41:c7:b7:1d:54:14:4b:fb:40:c1:1f:3f:1d:0b:42:f5:ee:a1:2d +-----BEGIN CERTIFICATE----- +MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNV +BAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4X +DTA3MDYyOTE1MTMwNVoXDTI3MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQ +BgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwIQ2VydGlnbmEwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7qXOEm7RFHYeGifBZ4 +QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyHGxny +gQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbw +zBfsV1/pogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q +130yGLMLLGq/jj8UEYkgDncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2 +JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKfIrjxwo1p3Po6WAbfAgMBAAGjgbwwgbkw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQtCRZvgHyUtVF9lo53BEw +ZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJBgNVBAYT +AkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzj +AQ/JSP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG +9w0BAQUFAAOCAQEAhQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8h +bV6lUmPOEvjvKtpv6zf+EwLHyzs+ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFnc +fca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1kluPBS1xp81HlDQwY9qcEQCYsuu +HWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY1gkIl2PlwS6w +t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw +WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== +-----END CERTIFICATE----- + +# Issuer: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center +# Subject: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center +# Label: "Deutsche Telekom Root CA 2" +# Serial: 38 +# MD5 Fingerprint: 74:01:4a:91:b1:08:c4:58:ce:47:cd:f0:dd:11:53:08 +# SHA1 Fingerprint: 85:a4:08:c0:9c:19:3e:5d:51:58:7d:cd:d6:13:30:fd:8c:de:37:bf +# SHA256 Fingerprint: b6:19:1a:50:d0:c3:97:7f:7d:a9:9b:cd:aa:c8:6a:22:7d:ae:b9:67:9e:c7:0b:a3:b0:c9:d9:22:71:c1:70:d3 +-----BEGIN CERTIFICATE----- +MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEc +MBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2Vj +IFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENB +IDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5MjM1OTAwWjBxMQswCQYDVQQGEwJE +RTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxl +U2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290 +IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEU +ha88EOQ5bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhC +QN/Po7qCWWqSG6wcmtoIKyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1Mjwr +rFDa1sPeg5TKqAyZMg4ISFZbavva4VhYAUlfckE8FQYBjl2tqriTtM2e66foai1S +NNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aKSe5TBY8ZTNXeWHmb0moc +QqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTVjlsB9WoH +txa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAP +BgNVHRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOC +AQEAlGRZrTlk5ynrE/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756Abrsp +tJh6sTtU6zkXR34ajgv8HzFZMQSyzhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpa +IzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8rZ7/gFnkm0W09juwzTkZmDLl +6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4Gdyd1Lx+4ivn+ +xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU +Cm26OWMohpLzGITY+9HPBVZkVw== +-----END CERTIFICATE----- + +# Issuer: CN=Cybertrust Global Root O=Cybertrust, Inc +# Subject: CN=Cybertrust Global Root O=Cybertrust, Inc +# Label: "Cybertrust Global Root" +# Serial: 4835703278459682877484360 +# MD5 Fingerprint: 72:e4:4a:87:e3:69:40:80:77:ea:bc:e3:f4:ff:f0:e1 +# SHA1 Fingerprint: 5f:43:e5:b1:bf:f8:78:8c:ac:1c:c7:ca:4a:9a:c6:22:2b:cc:34:c6 +# SHA256 Fingerprint: 96:0a:df:00:63:e9:63:56:75:0c:29:65:dd:0a:08:67:da:0b:9c:bd:6e:77:71:4a:ea:fb:23:49:ab:39:3d:a3 +-----BEGIN CERTIFICATE----- +MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYG +A1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2Jh +bCBSb290MB4XDTA2MTIxNTA4MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UE +ChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBS +b290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA+Mi8vRRQZhP/8NN5 +7CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW0ozS +J8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2y +HLtgwEZLAfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iP +t3sMpTjr3kfb1V05/Iin89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNz +FtApD0mpSPCzqrdsxacwOUBdrsTiXSZT8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAY +XSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/ +MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2MDSgMqAw +hi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3Js +MB8GA1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUA +A4IBAQBW7wojoFROlZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMj +Wqd8BfP9IjsO0QbE2zZMcwSO5bAi5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUx +XOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2hO0j9n0Hq0V+09+zv+mKts2o +omcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+TX3EJIrduPuoc +A06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW +WL1WMRJOEcgh4LMRkWXbtKaIOM5V +-----END CERTIFICATE----- + +# Issuer: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority +# Subject: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority +# Label: "ePKI Root Certification Authority" +# Serial: 28956088682735189655030529057352760477 +# MD5 Fingerprint: 1b:2e:00:ca:26:06:90:3d:ad:fe:6f:15:68:d3:6b:b3 +# SHA1 Fingerprint: 67:65:0d:f1:7e:8e:7e:5b:82:40:a4:f4:56:4b:cf:e2:3d:69:c6:f0 +# SHA256 Fingerprint: c0:a6:f4:dc:63:a2:4b:fd:cf:54:ef:2a:6a:08:2a:0a:72:de:35:80:3e:2f:f5:ff:52:7a:e5:d8:72:06:df:d5 +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBe +MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0 +ZC4xKjAoBgNVBAsMIWVQS0kgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe +Fw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMxMjdaMF4xCzAJBgNVBAYTAlRXMSMw +IQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEqMCgGA1UECwwhZVBL +SSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAH +SyZbCUNsIZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAh +ijHyl3SJCRImHJ7K2RKilTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3X +DZoTM1PRYfl61dd4s5oz9wCGzh1NlDivqOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1 +TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX12ruOzjjK9SXDrkb5wdJ +fzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0OWQqraffA +sgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uU +WH1+ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLS +nT0IFaUQAS2zMnaolQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pH +dmX2Os+PYhcZewoozRrSgx4hxyy/vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJip +NiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXiZo1jDiVN1Rmy5nk3pyKdVDEC +AwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/QkqiMAwGA1UdEwQF +MAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH +ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGB +uvl2ICO1J2B01GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6Yl +PwZpVnPDimZI+ymBV3QGypzqKOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkP +JXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdVxrsStZf0X4OFunHB2WyBEXYKCrC/ +gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEPNXubrjlpC2JgQCA2 +j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+rGNm6 +5ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUB +o2M3IUxExJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS +/jQ6fbjpKdx2qcgw+BRxgMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2z +Gp1iro2C6pSe3VkQw63d4k3jMdXH7OjysP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTE +W9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmODBCEIZ43ygknQW/2xzQ+D +hNQ+IIX3Sj0rnP0qCglN6oH4EZw= +-----END CERTIFICATE----- + +# Issuer: O=certSIGN OU=certSIGN ROOT CA +# Subject: O=certSIGN OU=certSIGN ROOT CA +# Label: "certSIGN ROOT CA" +# Serial: 35210227249154 +# MD5 Fingerprint: 18:98:c0:d6:e9:3a:fc:f9:b0:f5:0c:f7:4b:01:44:17 +# SHA1 Fingerprint: fa:b7:ee:36:97:26:62:fb:2d:b0:2a:f6:bf:03:fd:e8:7c:4b:2f:9b +# SHA256 Fingerprint: ea:a9:62:c4:fa:4a:6b:af:eb:e4:15:19:6d:35:1c:cd:88:8d:4f:53:f3:fa:8a:e6:d7:c4:66:a9:4e:60:42:bb +-----BEGIN CERTIFICATE----- +MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYT +AlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBD +QTAeFw0wNjA3MDQxNzIwMDRaFw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJP +MREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7IJUqOtdu0KBuqV5Do +0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHHrfAQ +UySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5d +RdY4zTW2ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQ +OA7+j0xbm0bqQfWwCHTD0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwv +JoIQ4uNllAoEwF73XVv4EOLQunpL+943AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08C +AwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAcYwHQYDVR0O +BBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IBAQA+0hyJ +LjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecY +MnQ8SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ +44gx+FkagQnIl6Z0x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6I +Jd1hJyMctTEHBDa0GpC9oHRxUIltvBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNw +i/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7NzTogVZ96edhBiIL5VaZVDADlN +9u6wWk5JRFRYX0KD +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only +# Subject: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only +# Label: "GeoTrust Primary Certification Authority - G3" +# Serial: 28809105769928564313984085209975885599 +# MD5 Fingerprint: b5:e8:34:36:c9:10:44:58:48:70:6d:2e:83:d4:b8:05 +# SHA1 Fingerprint: 03:9e:ed:b8:0b:e7:a0:3c:69:53:89:3b:20:d2:d9:32:3a:4c:2a:fd +# SHA256 Fingerprint: b4:78:b8:12:25:0d:f8:78:63:5c:2a:a7:ec:7d:15:5e:aa:62:5e:e8:29:16:e2:cd:29:43:61:88:6c:d1:fb:d4 +-----BEGIN CERTIFICATE----- +MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCB +mDELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsT +MChjKSAyMDA4IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25s +eTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhv +cml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIzNTk1OVowgZgxCzAJ +BgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg +MjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0 +BgNVBAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz ++uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5jK/BGvESyiaHAKAxJcCGVn2TAppMSAmUm +hsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdEc5IiaacDiGydY8hS2pgn +5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3CIShwiP/W +JmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exAL +DmKudlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZC +huOl1UcCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw +HQYDVR0OBBYEFMR5yo6hTgMdHNxr2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IB +AQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9cr5HqQ6XErhK8WTTOd8lNNTB +zU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbEAp7aDHdlDkQN +kv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD +AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUH +SJsMC8tJP33st/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2G +spki4cErx5z481+oghLrGREt +-----END CERTIFICATE----- + +# Issuer: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only +# Subject: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only +# Label: "thawte Primary Root CA - G2" +# Serial: 71758320672825410020661621085256472406 +# MD5 Fingerprint: 74:9d:ea:60:24:c4:fd:22:53:3e:cc:3a:72:d9:29:4f +# SHA1 Fingerprint: aa:db:bc:22:23:8f:c4:01:a1:27:bb:38:dd:f4:1d:db:08:9e:f0:12 +# SHA256 Fingerprint: a4:31:0d:50:af:18:a6:44:71:90:37:2a:86:af:af:8b:95:1f:fb:43:1d:83:7f:1e:56:88:b4:59:71:ed:15:57 +-----BEGIN CERTIFICATE----- +MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDEL +MAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMp +IDIwMDcgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAi +BgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMjAeFw0wNzExMDUwMDAw +MDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh +d3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBGb3Ig +YXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9v +dCBDQSAtIEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/ +BebfowJPDQfGAFG6DAJSLSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6 +papu+7qzcMBniKI11KOasf2twu8x+qi58/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUmtgAMADna3+FGO6Lts6K +DPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUNG4k8VIZ3 +KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41ox +XZ3Krr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg== +-----END CERTIFICATE----- + +# Issuer: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only +# Subject: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only +# Label: "thawte Primary Root CA - G3" +# Serial: 127614157056681299805556476275995414779 +# MD5 Fingerprint: fb:1b:5d:43:8a:94:cd:44:c6:76:f2:43:4b:47:e7:31 +# SHA1 Fingerprint: f1:8b:53:8d:1b:e9:03:b6:a6:f0:56:43:5b:17:15:89:ca:f3:6b:f2 +# SHA256 Fingerprint: 4b:03:f4:58:07:ad:70:f2:1b:fc:2c:ae:71:c9:fd:e4:60:4c:06:4c:f5:ff:b6:86:ba:e5:db:aa:d7:fd:d3:4c +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCB +rjELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf +Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw +MDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNV +BAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0wODA0MDIwMDAwMDBa +Fw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhhd3Rl +LCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9u +MTgwNgYDVQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXpl +ZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEcz +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsr8nLPvb2FvdeHsbnndm +gcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2AtP0LMqmsywCPLLEHd5N/8 +YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC+BsUa0Lf +b1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS9 +9irY7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2S +zhkGcuYMXDhpxwTWvGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUk +OQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNV +HQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJKoZIhvcNAQELBQADggEBABpA +2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweKA3rD6z8KLFIW +oCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu +t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7c +KUGRIjxpp7sC8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fM +m7v/OeZWYdMKp8RcTGB7BXcmer/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZu +MdRAGmI0Nj81Aa6sY6A= +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only +# Subject: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only +# Label: "GeoTrust Primary Certification Authority - G2" +# Serial: 80682863203381065782177908751794619243 +# MD5 Fingerprint: 01:5e:d8:6b:bd:6f:3d:8e:a1:31:f8:12:e0:98:73:6a +# SHA1 Fingerprint: 8d:17:84:d5:37:f3:03:7d:ec:70:fe:57:8b:51:9a:99:e6:10:d7:b0 +# SHA256 Fingerprint: 5e:db:7a:c4:3b:82:a0:6a:87:61:e8:d7:be:49:79:eb:f2:61:1f:7d:d7:9b:f9:1c:1c:6b:56:6a:21:9e:d7:66 +-----BEGIN CERTIFICATE----- +MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDEL +MAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChj +KSAyMDA3IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2 +MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 +eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1OVowgZgxCzAJBgNV +BAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykgMjAw +NyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNV +BAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBH +MjB2MBAGByqGSM49AgEGBSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcL +So17VDs6bl8VAsBQps8lL33KSLjHUGMcKiEIfJo22Av+0SbFWDEwKCXzXV2juLal +tJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+EVXVMAoG +CCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGT +qQ7mndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBucz +rD6ogRLQy7rQkgu2npaqBA+K +-----END CERTIFICATE----- + +# Issuer: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only +# Subject: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only +# Label: "VeriSign Universal Root Certification Authority" +# Serial: 85209574734084581917763752644031726877 +# MD5 Fingerprint: 8e:ad:b5:01:aa:4d:81:e4:8c:1d:d1:e1:14:00:95:19 +# SHA1 Fingerprint: 36:79:ca:35:66:87:72:30:4d:30:a5:fb:87:3b:0f:a7:7b:b7:0d:54 +# SHA256 Fingerprint: 23:99:56:11:27:a5:71:25:de:8c:ef:ea:61:0d:df:2f:a0:78:b5:c8:06:7f:4e:82:82:90:bf:b8:60:e8:4b:3c +-----BEGIN CERTIFICATE----- +MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCB +vTELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL +ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJp +U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MTgwNgYDVQQDEy9W +ZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe +Fw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJVUzEX +MBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0 +IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9y +IGF1dGhvcml6ZWQgdXNlIG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNh +bCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj1mCOkdeQmIN65lgZOIzF +9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGPMiJhgsWH +H26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+H +LL729fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN +/BMReYTtXlT2NJ8IAfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPT +rJ9VAMf2CGqUuV/c4DPxhGD5WycRtPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1Ud +EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0GCCsGAQUFBwEMBGEwX6FdoFsw +WTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2Oa8PPgGrUSBgs +exkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud +DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4 +sAPmLGd75JR3Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+ +seQxIcaBlVZaDrHC1LGmWazxY8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz +4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTxP/jgdFcrGJ2BtMQo2pSXpXDrrB2+ +BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+PwGZsY6rp2aQW9IHR +lRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4mJO3 +7M2CYfE45k+XmCpajQ== +-----END CERTIFICATE----- + +# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only +# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only +# Label: "VeriSign Class 3 Public Primary Certification Authority - G4" +# Serial: 63143484348153506665311985501458640051 +# MD5 Fingerprint: 3a:52:e1:e7:fd:6f:3a:e3:6f:f3:6f:99:1b:f9:22:41 +# SHA1 Fingerprint: 22:d5:d8:df:8f:02:31:d1:8d:f7:9d:b7:cf:8a:2d:64:c9:3f:6c:3a +# SHA256 Fingerprint: 69:dd:d7:ea:90:bb:57:c9:3e:13:5d:c8:5e:a6:fc:d5:48:0b:60:32:39:bd:c4:54:fc:75:8b:2a:26:cf:7f:79 +-----BEGIN CERTIFICATE----- +MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW +ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp +U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y +aXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjELMAkG +A1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJp +U2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwg +SW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2ln +biBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8Utpkmw4tXNherJI9/gHm +GUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGzrl0Bp3ve +fLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJ +aW1hZ2UvZ2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYj +aHR0cDovL2xvZ28udmVyaXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMW +kf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMDA2gAMGUCMGYhDBgmYFo4e1ZC +4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIxAJw9SDkjOVga +FRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA== +-----END CERTIFICATE----- + +# Issuer: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services) +# Subject: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services) +# Label: "NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny" +# Serial: 80544274841616 +# MD5 Fingerprint: c5:a1:b7:ff:73:dd:d6:d7:34:32:18:df:fc:3c:ad:88 +# SHA1 Fingerprint: 06:08:3f:59:3f:15:a1:04:a0:69:a4:6b:a9:03:d0:06:b7:97:09:91 +# SHA256 Fingerprint: 6c:61:da:c3:a2:de:f0:31:50:6b:e0:36:d2:a6:fe:40:19:94:fb:d1:3d:f9:c8:d4:66:59:92:74:c4:46:ec:98 +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQG +EwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3 +MDUGA1UECwwuVGFuw7pzw610dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNl +cnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBBcmFueSAoQ2xhc3MgR29sZCkgRsWR +dGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgxMjA2MTUwODIxWjCB +pzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxOZXRM +b2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlm +aWNhdGlvbiBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNz +IEdvbGQpIEbFkXRhbsO6c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAxCRec75LbRTDofTjl5Bu0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrT +lF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw/HpYzY6b7cNGbIRwXdrz +AZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAkH3B5r9s5 +VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRG +ILdwfzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2 +BJtr+UBdADTHLpl1neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAG +AQH/AgEEMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2M +U9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwWqZw8UQCgwBEIBaeZ5m8BiFRh +bvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTtaYtOUZcTh5m2C ++C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC +bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2F +uLjbvrW5KfnaNwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2 +XjG4Kvte9nHfRCaexOYNkbQudZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= +-----END CERTIFICATE----- + +# Issuer: CN=Staat der Nederlanden Root CA - G2 O=Staat der Nederlanden +# Subject: CN=Staat der Nederlanden Root CA - G2 O=Staat der Nederlanden +# Label: "Staat der Nederlanden Root CA - G2" +# Serial: 10000012 +# MD5 Fingerprint: 7c:a5:0f:f8:5b:9a:7d:6d:30:ae:54:5a:e3:42:a2:8a +# SHA1 Fingerprint: 59:af:82:79:91:86:c7:b4:75:07:cb:cf:03:57:46:eb:04:dd:b7:16 +# SHA256 Fingerprint: 66:8c:83:94:7d:a6:3b:72:4b:ec:e1:74:3c:31:a0:e6:ae:d0:db:8e:c5:b3:1b:e3:77:bb:78:4f:91:b6:71:6f +-----BEGIN CERTIFICATE----- +MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO +TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh +dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oX +DTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl +ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv +b3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ5291 +qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8Sp +uOUfiUtnvWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPU +Z5uW6M7XxgpT0GtJlvOjCwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvE +pMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiile7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp +5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCROME4HYYEhLoaJXhena/M +UGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpICT0ugpTN +GmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy +5V6548r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv +6q012iDTiIJh8BIitrzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEK +eN5KzlW/HdXZt1bv8Hb/C3m1r737qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6 +B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMBAAGjgZcwgZQwDwYDVR0TAQH/ +BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcCARYxaHR0cDov +L3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqG +SIb3DQEBCwUAA4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLyS +CZa59sCrI2AGeYwRTlHSeYAz+51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen +5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwjf/ST7ZwaUb7dRUG/kSS0H4zpX897 +IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaNkqbG9AclVMwWVxJK +gnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfkCpYL ++63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxL +vJxxcypFURmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkm +bEgeqmiSBeGCc1qb3AdbCG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvk +N1trSt8sV4pAWja63XVECDdCcAz+3F4hoKOKwJCcaNpQ5kUQR3i2TtJlycM33+FC +Y7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoVIPVVYpbtbZNQvOSqeK3Z +ywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm66+KAQ== +-----END CERTIFICATE----- + +# Issuer: CN=SecureSign RootCA11 O=Japan Certification Services, Inc. +# Subject: CN=SecureSign RootCA11 O=Japan Certification Services, Inc. +# Label: "SecureSign RootCA11" +# Serial: 1 +# MD5 Fingerprint: b7:52:74:e2:92:b4:80:93:f2:75:e4:cc:d7:f2:ea:26 +# SHA1 Fingerprint: 3b:c4:9f:48:f8:f3:73:a0:9c:1e:bd:f8:5b:b1:c3:65:c7:d8:11:b3 +# SHA256 Fingerprint: bf:0f:ee:fb:9e:3a:58:1a:d5:f9:e9:db:75:89:98:57:43:d2:61:08:5c:4d:31:4f:6f:5d:72:59:aa:42:16:12 +-----BEGIN CERTIFICATE----- +MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDEr +MCkGA1UEChMiSmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoG +A1UEAxMTU2VjdXJlU2lnbiBSb290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0 +MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSswKQYDVQQKEyJKYXBhbiBDZXJ0aWZp +Y2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1cmVTaWduIFJvb3RD +QTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvLTJsz +i1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8 +h9uuywGOwvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOV +MdrAG/LuYpmGYz+/3ZMqg6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9 +UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rPO7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni +8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitAbpSACW22s293bzUIUPsC +h8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZXt94wDgYD +VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB +AKChOBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xm +KbabfSVSSUOrTC4rbnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQ +X5Ucv+2rIrVls4W6ng+4reV6G4pQOh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWr +QbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01y8hSyn+B/tlr0/cR7SXf+Of5 +pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061lgeLKBObjBmN +QSdJQO7e5iNEOdyhIta6A/I= +-----END CERTIFICATE----- + +# Issuer: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. +# Subject: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. +# Label: "Microsec e-Szigno Root CA 2009" +# Serial: 14014712776195784473 +# MD5 Fingerprint: f8:49:f4:03:bc:44:2d:83:be:48:69:7d:29:64:fc:b1 +# SHA1 Fingerprint: 89:df:74:fe:5c:f4:0f:4a:80:f9:e3:37:7d:54:da:91:e1:01:31:8e +# SHA256 Fingerprint: 3c:5f:81:fe:a5:fa:b8:2c:64:bf:a2:ea:ec:af:cd:e8:e0:77:fc:86:20:a7:ca:e5:37:16:3d:f3:6e:db:f3:78 +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD +VQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0 +ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0G +CSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTAeFw0wOTA2MTYxMTMwMThaFw0y +OTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3Qx +FjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3pp +Z25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o +dTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvP +kd6mJviZpWNwrZuuyjNAfW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tc +cbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG0IMZfcChEhyVbUr02MelTTMuhTlAdX4U +fIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKApxn1ntxVUwOXewdI/5n7 +N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm1HxdrtbC +xkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1 ++rUCAwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPM +Pcu1SCOhGnqmKrs0aDAbBgNVHREEFDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqG +SIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0olZMEyL/azXm4Q5DwpL7v8u8h +mLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfXI/OMn74dseGk +ddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 +tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c +2Pm2G2JwCz02yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5t +HMN1Rq41Bab2XD0h7lbwyYIiLXpUq3DDfSJlgnCW +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 +# Label: "GlobalSign Root CA - R3" +# Serial: 4835703278459759426209954 +# MD5 Fingerprint: c5:df:b8:49:ca:05:13:55:ee:2d:ba:1a:c3:3e:b0:28 +# SHA1 Fingerprint: d6:9b:56:11:48:f0:1c:77:c5:45:78:c1:09:26:df:5b:85:69:76:ad +# SHA256 Fingerprint: cb:b5:22:d7:b7:f1:27:ad:6a:01:13:86:5b:df:1c:d4:10:2e:7d:07:59:af:63:5a:7c:f4:72:0d:c9:63:c5:3b +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G +A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp +Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4 +MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG +A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8 +RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT +gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm +KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd +QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ +XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o +LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU +RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp +jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK +6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX +mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs +Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH +WD9f +-----END CERTIFICATE----- + +# Issuer: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Subject: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Label: "Autoridad de Certificacion Firmaprofesional CIF A62634068" +# Serial: 6047274297262753887 +# MD5 Fingerprint: 73:3a:74:7a:ec:bb:a3:96:a6:c2:e4:e2:c8:9b:c0:c3 +# SHA1 Fingerprint: ae:c5:fb:3f:c8:e1:bf:c4:e5:4f:03:07:5a:9a:e8:00:b7:f7:b6:fa +# SHA256 Fingerprint: 04:04:80:28:bf:1f:28:64:d4:8f:9a:d4:d8:32:94:36:6a:82:88:56:55:3f:3b:14:30:3f:90:14:7f:5d:40:ef +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UE +BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h +cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEy +MzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg +Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9 +thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM +cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG +L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i +NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h +X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b +m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy +Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja +EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T +KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF +6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh +OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1UdEwEB/wQIMAYBAf8CAQEwDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNHDhpkLzCBpgYD +VR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp +cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBv +ACAAZABlACAAbABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBl +AGwAbwBuAGEAIAAwADgAMAAxADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF +661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx51tkljYyGOylMnfX40S2wBEqgLk9 +am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qkR71kMrv2JYSiJ0L1 +ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaPT481 +PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS +3a/DTg4fJl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5k +SeTy36LssUzAKh3ntLFlosS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF +3dvd6qJ2gHN99ZwExEWN57kci57q13XRcrHedUTnQn3iV2t93Jm8PYMo6oCTjcVM +ZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoRsaS8I8nkvof/uZS2+F0g +StRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTDKCOM/icz +Q0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQB +jLMi6Et8Vcad+qMUu2WFbm5PEn4KPJ2V +-----END CERTIFICATE----- + +# Issuer: CN=Izenpe.com O=IZENPE S.A. +# Subject: CN=Izenpe.com O=IZENPE S.A. +# Label: "Izenpe.com" +# Serial: 917563065490389241595536686991402621 +# MD5 Fingerprint: a6:b0:cd:85:80:da:5c:50:34:a3:39:90:2f:55:67:73 +# SHA1 Fingerprint: 2f:78:3d:25:52:18:a7:4a:65:39:71:b5:2c:a2:9c:45:15:6f:e9:19 +# SHA256 Fingerprint: 25:30:cc:8e:98:32:15:02:ba:d9:6f:9b:1f:ba:1b:09:9e:2d:29:9e:0f:45:48:bb:91:4f:36:3b:c0:d4:53:1f +-----BEGIN CERTIFICATE----- +MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4 +MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6 +ZW5wZS5jb20wHhcNMDcxMjEzMTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYD +VQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5j +b20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ03rKDx6sp4boFmVq +scIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAKClaO +xdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6H +LmYRY2xU+zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFX +uaOKmMPsOzTFlUFpfnXCPCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQD +yCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxTOTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+ +JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbKF7jJeodWLBoBHmy+E60Q +rLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK0GqfvEyN +BjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8L +hij+0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIB +QFqNeb+Lz0vPqhbBleStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+ +HMh3/1uaD7euBUbl8agW7EekFwIDAQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2lu +Zm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+SVpFTlBFIFMuQS4gLSBDSUYg +QTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBGNjIgUzgxQzBB +BgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx +MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUA +A4ICAQB4pgwWSp9MiDrAyw6lFn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWb +laQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbgakEyrkgPH7UIBzg/YsfqikuFgba56 +awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8qhT/AQKM6WfxZSzwo +JNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Csg1lw +LDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCT +VyvehQP5aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGk +LhObNA5me0mrZJfQRsN5nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJb +UjWumDqtujWTI6cfSN01RpiyEGjkpTHCClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/ +QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZoQ0iy2+tzJOeRf1SktoA+ +naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1ZWrOZyGls +QyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== +-----END CERTIFICATE----- + +# Issuer: CN=Chambers of Commerce Root - 2008 O=AC Camerfirma S.A. +# Subject: CN=Chambers of Commerce Root - 2008 O=AC Camerfirma S.A. +# Label: "Chambers of Commerce Root - 2008" +# Serial: 11806822484801597146 +# MD5 Fingerprint: 5e:80:9e:84:5a:0e:65:0b:17:02:f3:55:18:2a:3e:d7 +# SHA1 Fingerprint: 78:6a:74:ac:76:ab:14:7f:9c:6a:30:50:ba:9e:a8:7e:fe:9a:ce:3c +# SHA256 Fingerprint: 06:3e:4a:fa:c4:91:df:d3:32:f3:08:9b:85:42:e9:46:17:d8:93:d7:fe:94:4e:10:a7:93:7e:e2:9d:96:93:c0 +-----BEGIN CERTIFICATE----- +MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYD +VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0 +IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3 +MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xKTAnBgNVBAMTIENoYW1iZXJz +IG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEyMjk1MFoXDTM4MDcz +MTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBj +dXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIw +EAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEp +MCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0G +CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW9 +28sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKAXuFixrYp4YFs8r/lfTJq +VKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorjh40G072Q +DuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR +5gN/ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfL +ZEFHcpOrUMPrCXZkNNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05a +Sd+pZgvMPMZ4fKecHePOjlO+Bd5gD2vlGts/4+EhySnB8esHnFIbAURRPHsl18Tl +UlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331lubKgdaX8ZSD6e2wsWsSaR6s ++12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ0wlf2eOKNcx5 +Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj +ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAx +hduub+84Mxh2EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNV +HQ4EFgQU+SSsD7K1+HnA+mCIG8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1 ++HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpN +YWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29t +L2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVy +ZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAt +IDIwMDiCCQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRV +HSAAMCowKAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20w +DQYJKoZIhvcNAQEFBQADggIBAJASryI1wqM58C7e6bXpeHxIvj99RZJe6dqxGfwW +PJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH3qLPaYRgM+gQDROpI9CF +5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbURWpGqOt1 +glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaH +FoI6M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2 +pSB7+R5KBWIBpih1YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MD +xvbxrN8y8NmBGuScvfaAFPDRLLmF9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QG +tjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcKzBIKinmwPQN/aUv0NCB9szTq +jktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvGnrDQWzilm1De +fhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg +OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZ +d0jQ +-----END CERTIFICATE----- + +# Issuer: CN=Global Chambersign Root - 2008 O=AC Camerfirma S.A. +# Subject: CN=Global Chambersign Root - 2008 O=AC Camerfirma S.A. +# Label: "Global Chambersign Root - 2008" +# Serial: 14541511773111788494 +# MD5 Fingerprint: 9e:80:ff:78:01:0c:2e:c1:36:bd:fe:96:90:6e:08:f3 +# SHA1 Fingerprint: 4a:bd:ee:ec:95:0d:35:9c:89:ae:c7:52:a1:2c:5b:29:f6:d6:aa:0c +# SHA256 Fingerprint: 13:63:35:43:93:34:a7:69:80:16:a0:d3:24:de:72:28:4e:07:9d:7b:52:20:bb:8f:bd:74:78:16:ee:be:ba:ca +-----BEGIN CERTIFICATE----- +MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYD +VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0 +IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3 +MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD +aGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMxNDBaFw0zODA3MzEx +MjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3Vy +cmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAG +A1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAl +BgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZI +hvcNAQEBBQADggIPADCCAgoCggIBAMDfVtPkOpt2RbQT2//BthmLN0EYlVJH6xed +KYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXfXjaOcNFccUMd2drvXNL7 +G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0ZJJ0YPP2 +zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4 +ddPB/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyG +HoiMvvKRhI9lNNgATH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2 +Id3UwD2ln58fQ1DJu7xsepeY7s2MH/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3V +yJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfeOx2YItaswTXbo6Al/3K1dh3e +beksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSFHTynyQbehP9r +6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh +wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsog +zCtLkykPAgMBAAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQW +BBS5CcqcHtvTbDprru1U8VuTBjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDpr +ru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UEBhMCRVUxQzBBBgNVBAcTOk1hZHJp +ZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJmaXJtYS5jb20vYWRk +cmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJmaXJt +YSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiC +CQDJzdPp1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCow +KAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZI +hvcNAQEFBQADggIBAICIf3DekijZBZRG/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZ +UohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6ReAJ3spED8IXDneRRXoz +X1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/sdZ7LoR/x +fxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVz +a2Mg9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yyd +Yhz2rXzdpjEetrHHfoUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMd +SqlapskD7+3056huirRXhOukP9DuqqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9O +AP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETrP3iZ8ntxPjzxmKfFGBI/5rso +M0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVqc5iJWzouE4ge +v8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z +09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B +-----END CERTIFICATE----- + +# Issuer: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. +# Subject: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. +# Label: "Go Daddy Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: 80:3a:bc:22:c1:e6:fb:8d:9b:3b:27:4a:32:1b:9a:01 +# SHA1 Fingerprint: 47:be:ab:c9:22:ea:e8:0e:78:78:34:62:a7:9f:45:c2:54:fd:e6:8b +# SHA256 Fingerprint: 45:14:0b:32:47:eb:9c:c8:c5:b4:f0:d7:b5:30:91:f7:32:92:08:9e:6e:5a:63:e2:74:9d:d3:ac:a9:19:8e:da +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT +EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp +ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz +NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH +EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE +AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD +E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH +/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy +DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh +GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR +tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA +AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE +FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX +WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu +9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr +gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo +2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI +4uJEvlz36hz1 +-----END CERTIFICATE----- + +# Issuer: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Subject: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Label: "Starfield Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: d6:39:81:c6:52:7e:96:69:fc:fc:ca:66:ed:05:f2:96 +# SHA1 Fingerprint: b5:1c:06:7c:ee:2b:0c:3d:f8:55:ab:2d:92:f4:fe:39:d4:e7:0f:0e +# SHA256 Fingerprint: 2c:e1:cb:0b:f9:d2:f9:e1:02:99:3f:be:21:51:52:c3:b2:dd:0c:ab:de:1c:68:e5:31:9b:83:91:54:db:b7:f5 +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs +ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw +MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj +aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp +Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg +nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1 +HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N +Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN +dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0 +HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G +CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU +sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3 +4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg +8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1 +mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- + +# Issuer: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Subject: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Label: "Starfield Services Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: 17:35:74:af:7b:61:1c:eb:f4:f9:3c:e2:ee:40:f9:a2 +# SHA1 Fingerprint: 92:5a:8f:8d:2c:6d:04:e0:66:5f:59:6a:ff:22:d8:63:e8:25:6f:3f +# SHA256 Fingerprint: 56:8d:69:05:a2:c8:87:08:a4:b3:02:51:90:ed:cf:ed:b1:97:4a:60:6a:13:c6:e5:29:0f:cb:2a:e6:3e:da:b5 +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs +ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD +VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy +ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy +dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p +OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2 +8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K +Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe +hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk +6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw +DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q +AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI +bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB +ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z +qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn +0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN +sSi6 +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Commercial O=AffirmTrust +# Subject: CN=AffirmTrust Commercial O=AffirmTrust +# Label: "AffirmTrust Commercial" +# Serial: 8608355977964138876 +# MD5 Fingerprint: 82:92:ba:5b:ef:cd:8a:6f:a6:3d:55:f9:84:f6:d6:b7 +# SHA1 Fingerprint: f9:b5:b6:32:45:5f:9c:be:ec:57:5f:80:dc:e9:6e:2c:c7:b2:78:b7 +# SHA256 Fingerprint: 03:76:ab:1d:54:c5:f9:80:3c:e4:b2:e2:01:a0:ee:7e:ef:7b:57:b6:36:e8:a9:3c:9b:8d:48:60:c9:6f:5f:a7 +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP +Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr +ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL +MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1 +yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr +VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/ +nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG +XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj +vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt +Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g +N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC +nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Networking O=AffirmTrust +# Subject: CN=AffirmTrust Networking O=AffirmTrust +# Label: "AffirmTrust Networking" +# Serial: 8957382827206547757 +# MD5 Fingerprint: 42:65:ca:be:01:9a:9a:4c:a9:8c:41:49:cd:c0:d5:7f +# SHA1 Fingerprint: 29:36:21:02:8b:20:ed:02:f5:66:c5:32:d1:d6:ed:90:9f:45:00:2f +# SHA256 Fingerprint: 0a:81:ec:5a:92:97:77:f1:45:90:4a:f3:8d:5d:50:9f:66:b5:e2:c5:8f:cd:b5:31:05:8b:0e:17:f3:f0:b4:1b +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y +YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua +kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL +QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp +6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG +yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i +QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO +tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu +QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ +Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u +olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48 +x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Premium O=AffirmTrust +# Subject: CN=AffirmTrust Premium O=AffirmTrust +# Label: "AffirmTrust Premium" +# Serial: 7893706540734352110 +# MD5 Fingerprint: c4:5d:0e:48:b6:ac:28:30:4e:0a:bc:f9:38:16:87:57 +# SHA1 Fingerprint: d8:a6:33:2c:e0:03:6f:b1:85:f6:63:4f:7d:6a:06:65:26:32:28:27 +# SHA256 Fingerprint: 70:a7:3f:7f:37:6b:60:07:42:48:90:45:34:b1:14:82:d5:bf:0e:69:8e:cc:49:8d:f5:25:77:eb:f2:e9:3b:9a +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz +dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG +A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U +cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf +qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ +JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ ++jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS +s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5 +HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7 +70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG +V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S +qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S +5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia +C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX +OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE +FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2 +KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B +8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ +MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc +0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ +u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF +u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH +YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8 +GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO +RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e +KeC2uAloGRwYQw== +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Premium ECC O=AffirmTrust +# Subject: CN=AffirmTrust Premium ECC O=AffirmTrust +# Label: "AffirmTrust Premium ECC" +# Serial: 8401224907861490260 +# MD5 Fingerprint: 64:b0:09:55:cf:b1:d5:99:e2:be:13:ab:a6:5d:ea:4d +# SHA1 Fingerprint: b8:23:6b:00:2f:1d:16:86:53:01:55:6c:11:a4:37:ca:eb:ff:c3:bb +# SHA256 Fingerprint: bd:71:fd:f6:da:97:e4:cf:62:d1:64:7a:dd:25:81:b0:7d:79:ad:f8:39:7e:b4:ec:ba:9c:5e:84:88:82:14:23 +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC +VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ +cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ +BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt +VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D +0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9 +ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G +A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs +aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I +flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ== +-----END CERTIFICATE----- + +# Issuer: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Subject: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Label: "Certum Trusted Network CA" +# Serial: 279744 +# MD5 Fingerprint: d5:e9:81:40:c5:18:69:fc:46:2c:89:75:62:0f:aa:78 +# SHA1 Fingerprint: 07:e0:32:e0:20:b7:2c:3f:19:2f:06:28:a2:59:3a:19:a7:0f:06:9e +# SHA256 Fingerprint: 5c:58:46:8d:55:f5:8e:49:7e:74:39:82:d2:b5:00:10:b6:d1:65:37:4a:cf:83:a7:d4:a3:2d:b7:68:c4:40:8e +-----BEGIN CERTIFICATE----- +MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBM +MSIwIAYDVQQKExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5D +ZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBU +cnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIyMTIwNzM3WhcNMjkxMjMxMTIwNzM3 +WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMg +Uy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MSIw +IAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rH +UV+rpDKmYYe2bg+G0jACl/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LM +TXPb865Px1bVWqeWifrzq2jUI4ZZJ88JJ7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVU +BBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4fOQtf/WsX+sWn7Et0brM +kUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0cvW0QM8x +AcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNV +HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15y +sHhE49wcrwn9I0j6vSrEuVUEtRCjjSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfL +I9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1mS1FhIrlQgnXdAIv94nYmem8 +J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5ajZt3hrvJBW8qY +VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI +03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= +-----END CERTIFICATE----- + +# Issuer: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA +# Subject: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA +# Label: "TWCA Root Certification Authority" +# Serial: 1 +# MD5 Fingerprint: aa:08:8f:f6:f9:7b:b7:f2:b1:a7:1e:9b:ea:ea:bd:79 +# SHA1 Fingerprint: cf:9e:87:6d:d3:eb:fc:42:26:97:a3:b5:a3:7a:a0:76:a9:06:23:48 +# SHA256 Fingerprint: bf:d8:8f:e1:10:1c:41:ae:3e:80:1b:f8:be:56:35:0e:e9:ba:d1:a6:b9:bd:51:5e:dc:5c:6d:5b:87:11:ac:44 +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzES +MBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFU +V0NBIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMz +WhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJVEFJV0FO +LUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFE +AcK0HMMxQhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HH +K3XLfJ+utdGdIzdjp9xCoi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeX +RfwZVzsrb+RH9JlF/h3x+JejiB03HFyP4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/z +rX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1ry+UPizgN7gr8/g+YnzAx +3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkq +hkiG9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeC +MErJk/9q56YAf4lCmtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdls +XebQ79NqZp4VKIV66IIArB6nCWlWQtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62D +lhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVYT0bf+215WfKEIlKuD8z7fDvn +aspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocnyYh0igzyXxfkZ +YiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== +-----END CERTIFICATE----- + +# Issuer: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2 +# Subject: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2 +# Label: "Security Communication RootCA2" +# Serial: 0 +# MD5 Fingerprint: 6c:39:7d:a4:0e:55:59:b2:3f:d6:41:b1:12:50:de:43 +# SHA1 Fingerprint: 5f:3b:8c:f2:f8:10:b3:7d:78:b4:ce:ec:19:19:c3:73:34:b9:c7:74 +# SHA256 Fingerprint: 51:3b:2c:ec:b8:10:d4:cd:e5:dd:85:39:1a:df:c6:c2:dd:60:d8:7b:b7:36:d2:b5:21:48:4a:a4:7a:0e:be:f6 +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDEl +MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMe +U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoX +DTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRy +dXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3VyaXR5IENvbW11bmlj +YXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANAV +OVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGr +zbl+dp+++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVM +VAX3NuRFg3sUZdbcDE3R3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQ +hNBqyjoGADdH5H5XTz+L62e4iKrFvlNVspHEfbmwhRkGeC7bYRr6hfVKkaHnFtWO +ojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1KEOtOghY6rCcMU/Gt1SSw +awNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8QIH4D5cs +OPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3 +DQEBCwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpF +coJxDjrSzG+ntKEju/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXc +okgfGT+Ok+vx+hfuzU7jBBJV1uXk3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8 +t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy +1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29mvVXIwAHIRc/ +SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 +-----END CERTIFICATE----- + +# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority +# Subject: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority +# Label: "Hellenic Academic and Research Institutions RootCA 2011" +# Serial: 0 +# MD5 Fingerprint: 73:9f:4c:4b:73:5b:79:e9:fa:ba:1c:ef:6e:cb:d5:c9 +# SHA1 Fingerprint: fe:45:65:9b:79:03:5b:98:a1:61:b5:51:2e:ac:da:58:09:48:22:4d +# SHA256 Fingerprint: bc:10:4f:15:a4:8b:e7:09:dc:a5:42:a7:e1:d4:b9:df:6f:05:45:27:e8:02:ea:a9:2d:59:54:44:25:8a:fe:71 +-----BEGIN CERTIFICATE----- +MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1Ix +RDBCBgNVBAoTO0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1 +dGlvbnMgQ2VydC4gQXV0aG9yaXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1p +YyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIFJvb3RDQSAyMDExMB4XDTExMTIw +NjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYTAkdSMUQwQgYDVQQK +EztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIENl +cnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPz +dYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJ +fel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa71HFK9+WXesyHgLacEns +bgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u8yBRQlqD +75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSP +FEDH3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNV +HRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp +5dgTBCPuQSUwRwYDVR0eBEAwPqA8MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQu +b3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQub3JnMA0GCSqGSIb3DQEBBQUA +A4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVtXdMiKahsog2p +6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8 +TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7 +dIsXRSZMFpGD/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8Acys +Nnq/onN694/BtZqhFLKPM58N7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXI +l7WdmplNsDz4SgCbZN2fOUvRJ9e4 +-----END CERTIFICATE----- + +# Issuer: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967 +# Subject: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967 +# Label: "Actalis Authentication Root CA" +# Serial: 6271844772424770508 +# MD5 Fingerprint: 69:c1:0d:4f:07:a3:1b:c3:fe:56:3d:04:bc:11:f6:a6 +# SHA1 Fingerprint: f3:73:b3:87:06:5a:28:84:8a:f2:f3:4a:ce:19:2b:dd:c7:8e:9c:ac +# SHA256 Fingerprint: 55:92:60:84:ec:96:3a:64:b9:6e:2a:be:01:ce:0b:a8:6a:64:fb:fe:bc:c7:aa:b5:af:c1:55:b3:7f:d7:60:66 +-----BEGIN CERTIFICATE----- +MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UE +BhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8w +MzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 +IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDkyMjExMjIwMlowazELMAkGA1UEBhMC +SVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1 +ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENB +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNv +UTufClrJwkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX +4ay8IMKx4INRimlNAJZaby/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9 +KK3giq0itFZljoZUj5NDKd45RnijMCO6zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/ +gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1fYVEiVRvjRuPjPdA1Yprb +rxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2oxgkg4YQ +51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2F +be8lEfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxe +KF+w6D9Fz8+vm2/7hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4F +v6MGn8i1zeQf1xcGDXqVdFUNaBr8EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbn +fpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5jF66CyCU3nuDuP/jVo23Eek7 +jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLYiDrIn3hm7Ynz +ezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt +ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAL +e3KHwGCmSUyIWOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70 +jsNjLiNmsGe+b7bAEzlgqqI0JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDz +WochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKxK3JCaKygvU5a2hi/a5iB0P2avl4V +SM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+Xlff1ANATIGk0k9j +pwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC4yyX +X04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+Ok +fcvHlXHo2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7R +K4X9p2jIugErsWx0Hbhzlefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btU +ZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXemOR/qnuOf0GZvBeyqdn6/axag67XH/JJU +LysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9vwGYT7JZVEc+NHt4bVaT +LnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== +-----END CERTIFICATE----- + +# Issuer: O=Trustis Limited OU=Trustis FPS Root CA +# Subject: O=Trustis Limited OU=Trustis FPS Root CA +# Label: "Trustis FPS Root CA" +# Serial: 36053640375399034304724988975563710553 +# MD5 Fingerprint: 30:c9:e7:1e:6b:e6:14:eb:65:b2:16:69:20:31:67:4d +# SHA1 Fingerprint: 3b:c0:38:0b:33:c3:f6:a6:0c:86:15:22:93:d9:df:f5:4b:81:c0:04 +# SHA256 Fingerprint: c1:b4:82:99:ab:a5:20:8f:e9:63:0a:ce:55:ca:68:a0:3e:da:5a:51:9c:88:02:a0:d3:a6:73:be:8f:8e:55:7d +-----BEGIN CERTIFICATE----- +MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBF +MQswCQYDVQQGEwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQL +ExNUcnVzdGlzIEZQUyBSb290IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTEx +MzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNVBAoTD1RydXN0aXMgTGltaXRlZDEc +MBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQRUN+ +AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihH +iTHcDnlkH5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjj +vSkCqPoc4Vu5g6hBSLwacY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA +0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zto3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlB +OrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEAAaNTMFEwDwYDVR0TAQH/ +BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAdBgNVHQ4E +FgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01 +GX2cGE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmW +zaD+vkAMXBJV+JOCyinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP4 +1BIy+Q7DsdwyhEQsb8tGD+pmQQ9P8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZE +f1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHVl/9D7S3B2l0pKoU/rGXuhg8F +jZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYliB6XzCGcKQEN +ZetX2fNXlrtIzYE= +-----END CERTIFICATE----- + +# Issuer: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 +# Subject: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 +# Label: "Buypass Class 2 Root CA" +# Serial: 2 +# MD5 Fingerprint: 46:a7:d2:fe:45:fb:64:5a:a8:59:90:9b:78:44:9b:29 +# SHA1 Fingerprint: 49:0a:75:74:de:87:0a:47:fe:58:ee:f6:c7:6b:eb:c6:0b:12:40:99 +# SHA256 Fingerprint: 9a:11:40:25:19:7c:5b:b9:5d:94:e6:3d:55:cd:43:79:08:47:b6:46:b2:3c:df:11:ad:a4:a0:0e:ff:15:fb:48 +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMiBSb290IENBMB4XDTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1ow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1g1Lr +6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPV +L4O2fuPn9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC91 +1K2GScuVr1QGbNgGE41b/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHx +MlAQTn/0hpPshNOOvEu/XAFOBz3cFIqUCqTqc/sLUegTBxj6DvEr0VQVfTzh97QZ +QmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeffawrbD02TTqigzXsu8lkB +arcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgIzRFo1clr +Us3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLi +FRhnBkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRS +P/TizPJhk9H9Z2vXUq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN +9SG9dKpN6nIDSdvHXx1iY8f93ZHsM+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxP +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMmAd+BikoL1Rpzz +uvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAU18h +9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s +A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3t +OluwlN5E40EIosHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo ++fsicdl9sz1Gv7SEr5AcD48Saq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7 +KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYdDnkM/crqJIByw5c/8nerQyIKx+u2 +DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWDLfJ6v9r9jv6ly0Us +H8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0oyLQ +I+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK7 +5t98biGCwWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h +3PFaTWwyI0PurKju7koSCTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPz +Y11aWOIv4x3kqdbQCtCev9eBCfHJxyYNrJgWVqA= +-----END CERTIFICATE----- + +# Issuer: CN=Buypass Class 3 Root CA O=Buypass AS-983163327 +# Subject: CN=Buypass Class 3 Root CA O=Buypass AS-983163327 +# Label: "Buypass Class 3 Root CA" +# Serial: 2 +# MD5 Fingerprint: 3d:3b:18:9e:2c:64:5a:e8:d5:88:ce:0e:f9:37:c2:ec +# SHA1 Fingerprint: da:fa:f7:fa:66:84:ec:06:8f:14:50:bd:c7:c2:81:a5:bc:a9:64:57 +# SHA256 Fingerprint: ed:f7:eb:bc:a2:7a:2a:38:4d:38:7b:7d:40:10:c6:66:e2:ed:b4:84:3e:4c:29:b4:ae:1d:5b:93:32:e6:b2:4d +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMyBSb290IENBMB4XDTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRHsJ8Y +ZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3E +N3coTRiR5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9 +tznDDgFHmV0ST9tD+leh7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX +0DJq1l1sDPGzbjniazEuOQAnFN44wOwZZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c +/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH2xc519woe2v1n/MuwU8X +KhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV/afmiSTY +zIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvS +O1UQRwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D +34xFMFbG02SrZvPAXpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgP +K9Dx2hzLabjKSWJtyNBjYt1gD1iqj6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3 +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEe4zf/lb+74suwv +Tg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAACAj +QTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV +cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXS +IGrs/CIBKM+GuIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2 +HJLw5QY33KbmkJs4j1xrG0aGQ0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsa +O5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8ZORK15FTAaggiG6cX0S5y2CBNOxv +033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2KSb12tjE8nVhz36u +dmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz6MkE +kbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg41 +3OEMXbugUZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvD +u79leNKGef9JOxqDDPDeeOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq +4/g7u9xN12TyUb7mqqta6THuBrxzvxNiCp/HuZc= +-----END CERTIFICATE----- + +# Issuer: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Subject: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Label: "T-TeleSec GlobalRoot Class 3" +# Serial: 1 +# MD5 Fingerprint: ca:fb:40:a8:4e:39:92:8a:1d:fe:8e:2f:c4:27:ea:ef +# SHA1 Fingerprint: 55:a6:72:3e:cb:f2:ec:cd:c3:23:74:70:19:9d:2a:be:11:e3:81:d1 +# SHA256 Fingerprint: fd:73:da:d3:1c:64:4f:f1:b4:3b:ef:0c:cd:da:96:71:0b:9c:d9:87:5e:ca:7e:31:70:7a:f3:e9:6d:52:2b:bd +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgxMDAxMTAyOTU2WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN +8ELg63iIVl6bmlQdTQyK9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/ +RLyTPWGrTs0NvvAgJ1gORH8EGoel15YUNpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4 +hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZFiP0Zf3WHHx+xGwpzJFu5 +ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W0eDrXltM +EnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1 +A/d2O2GCahKqGFPrAyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOy +WL6ukK2YJ5f+AbGwUgC4TeQbIXQbfsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ +1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzTucpH9sry9uetuUg/vBa3wW30 +6gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7hP0HHRwA11fXT +91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml +e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4p +TpPDpFQUWw== +-----END CERTIFICATE----- + +# Issuer: CN=EE Certification Centre Root CA O=AS Sertifitseerimiskeskus +# Subject: CN=EE Certification Centre Root CA O=AS Sertifitseerimiskeskus +# Label: "EE Certification Centre Root CA" +# Serial: 112324828676200291871926431888494945866 +# MD5 Fingerprint: 43:5e:88:d4:7d:1a:4a:7e:fd:84:2e:52:eb:01:d4:6f +# SHA1 Fingerprint: c9:a8:b9:e7:55:80:5e:58:e3:53:77:a7:25:eb:af:c3:7b:27:cc:d7 +# SHA256 Fingerprint: 3e:84:ba:43:42:90:85:16:e7:75:73:c0:99:2f:09:79:ca:08:4e:46:85:68:1f:f1:95:cc:ba:8a:22:9b:8a:76 +-----BEGIN CERTIFICATE----- +MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1 +MQswCQYDVQQGEwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1 +czEoMCYGA1UEAwwfRUUgQ2VydGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYG +CSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIwMTAxMDMwMTAxMDMwWhgPMjAzMDEy +MTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlBUyBTZXJ0aWZpdHNl +ZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRyZSBS +b290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUy +euuOF0+W2Ap7kaJjbMeMTC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvO +bntl8jixwKIy72KyaOBhU8E2lf/slLo2rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIw +WFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw93X2PaRka9ZP585ArQ/d +MtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtNP2MbRMNE +1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/ +zQas8fElyalL1BSZMEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYB +BQUHAwMGCCsGAQUFBwMEBggrBgEFBQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEF +BQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+RjxY6hUFaTlrg4wCQiZrxTFGGV +v9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqMlIpPnTX/dqQG +E5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u +uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIW +iAYLtqZLICjU3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/v +GVCJYMzpJJUPwssd8m92kMfMdcGWxZ0= +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH +# Subject: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH +# Label: "D-TRUST Root Class 3 CA 2 2009" +# Serial: 623603 +# MD5 Fingerprint: cd:e0:25:69:8d:47:ac:9c:89:35:90:f7:fd:51:3d:2f +# SHA1 Fingerprint: 58:e8:ab:b0:36:15:33:fb:80:f7:9b:1b:6d:29:d3:ff:8d:5f:00:f0 +# SHA256 Fingerprint: 49:e7:a4:42:ac:f0:ea:62:87:05:00:54:b5:25:64:b6:50:e4:f4:9e:42:e3:48:d6:aa:38:e0:39:e9:57:b1:c1 +-----BEGIN CERTIFICATE----- +MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NTha +ME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMM +HkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOADER03 +UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42 +tSHKXzlABF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9R +ySPocq60vFYJfxLLHLGvKZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsM +lFqVlNpQmvH/pStmMaTJOKDfHR+4CS7zp+hnUquVH+BGPtikw8paxTGA6Eian5Rp +/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUCAwEAAaOCARowggEWMA8G +A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ4PGEMA4G +A1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVj +dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUy +MENBJTIwMiUyMDIwMDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRl +cmV2b2NhdGlvbmxpc3QwQ6BBoD+GPWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3Js +L2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAwOS5jcmwwDQYJKoZIhvcNAQEL +BQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm2H6NMLVwMeni +acfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0 +o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4K +zCUqNQT4YJEVdT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8 +PIWmawomDeCTmGCufsYkl4phX5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3Y +Johw1+qRzT65ysCQblrGXnRl11z+o+I= +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH +# Subject: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH +# Label: "D-TRUST Root Class 3 CA 2 EV 2009" +# Serial: 623604 +# MD5 Fingerprint: aa:c6:43:2c:5e:2d:cd:c4:34:c0:50:4f:11:02:4f:b6 +# SHA1 Fingerprint: 96:c9:1b:0b:95:b4:10:98:42:fa:d0:d8:22:79:fe:60:fa:b9:16:83 +# SHA256 Fingerprint: ee:c5:49:6b:98:8c:e9:86:25:b9:34:09:2e:ec:29:08:be:d0:b0:f3:16:c2:d4:73:0c:84:ea:f1:f3:d3:48:81 +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgRVYgMjAwOTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUw +NDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNV +BAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAwOTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfSegpn +ljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM0 +3TP1YtHhzRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6Z +qQTMFexgaDbtCHu39b+T7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lR +p75mpoo6Kr3HGrHhFPC+Oh25z1uxav60sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8 +HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure3511H3a6UCAwEAAaOCASQw +ggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyvcop9Ntea +HNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFw +Oi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xh +c3MlMjAzJTIwQ0ElMjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1E +RT9jZXJ0aWZpY2F0ZXJldm9jYXRpb25saXN0MEagRKBChkBodHRwOi8vd3d3LmQt +dHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xhc3NfM19jYV8yX2V2XzIwMDku +Y3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+PPoeUSbrh/Yp +3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05 +nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNF +CSuGdXzfX2lXANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7na +xpeG0ILD5EJt/rDiZE4OJudANCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqX +KVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVvw9y4AyHqnxbxLFS1 +-----END CERTIFICATE----- + +# Issuer: CN=CA Disig Root R2 O=Disig a.s. +# Subject: CN=CA Disig Root R2 O=Disig a.s. +# Label: "CA Disig Root R2" +# Serial: 10572350602393338211 +# MD5 Fingerprint: 26:01:fb:d8:27:a7:17:9a:45:54:38:1a:43:01:3b:03 +# SHA1 Fingerprint: b5:61:eb:ea:a4:de:e4:25:4b:69:1a:98:a5:57:47:c2:34:c7:d9:71 +# SHA256 Fingerprint: e2:3d:4a:03:6d:7b:70:e9:f5:95:b1:42:20:79:d2:b9:1e:df:bb:1f:b6:51:a0:63:3e:aa:8a:9d:c5:f8:07:03 +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV +BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu +MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQy +MDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx +EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjIw +ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbCw3Oe +NcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNH +PWSb6WiaxswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3I +x2ymrdMxp7zo5eFm1tL7A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbe +QTg06ov80egEFGEtQX6sx3dOy1FU+16SGBsEWmjGycT6txOgmLcRK7fWV8x8nhfR +yyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqVg8NTEQxzHQuyRpDRQjrO +QG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa5Beny912 +H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJ +QfYEkoopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUD +i/ZnWejBBhG93c+AAk9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORs +nLMOPReisjQS1n6yqEm70XooQL6iFh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1 +rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud +DwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5uQu0wDQYJKoZI +hvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM +tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqf +GopTpti72TVVsRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkb +lvdhuDvEK7Z4bLQjb/D907JedR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka ++elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W81k/BfDxujRNt+3vrMNDcTa/F1bal +TFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjxmHHEt38OFdAlab0i +nSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01utI3 +gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18Dr +G5gPcFw0sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3Os +zMOl6W8KjptlwlCFtaOgUxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8x +L4ysEr3vQCj8KWefshNPZiTEUxnpHikV7+ZtsH8tZ/3zbBt1RqPlShfppNcL +-----END CERTIFICATE----- + +# Issuer: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV +# Subject: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV +# Label: "ACCVRAIZ1" +# Serial: 6828503384748696800 +# MD5 Fingerprint: d0:a0:5a:ee:05:b6:09:94:21:a1:7d:f1:b2:29:82:02 +# SHA1 Fingerprint: 93:05:7a:88:15:c6:4f:ce:88:2f:fa:91:16:52:28:78:bc:53:64:17 +# SHA256 Fingerprint: 9a:6e:c0:12:e1:a7:da:9d:be:34:19:4d:47:8a:d7:c0:db:18:22:fb:07:1d:f1:29:81:49:6e:d1:04:38:41:13 +-----BEGIN CERTIFICATE----- +MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UE +AwwJQUNDVlJBSVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQsw +CQYDVQQGEwJFUzAeFw0xMTA1MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQ +BgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwHUEtJQUNDVjENMAsGA1UECgwEQUND +VjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCb +qau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gMjmoY +HtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWo +G2ioPej0RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpA +lHPrzg5XPAOBOp0KoVdDaaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhr +IA8wKFSVf+DuzgpmndFALW4ir50awQUZ0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/ +0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDGWuzndN9wrqODJerWx5eH +k6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs78yM2x/47 +4KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMO +m3WR5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpa +cXpkatcnYGMN285J9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPl +uUsXQA+xtrn13k/c4LOsOxFwYIRKQ26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYI +KwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRwOi8vd3d3LmFjY3YuZXMvZmls +ZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEuY3J0MB8GCCsG +AQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2 +VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeT +VfZW6oHlNsyMHj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIG +CCsGAQUFBwICMIIBFB6CARAAQQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUA +cgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBhAO0AegAgAGQAZQAgAGwAYQAgAEEA +QwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUAYwBuAG8AbABvAGcA +7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBjAHQA +cgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAA +QwBQAFMAIABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUA +czAwBggrBgEFBQcCARYkaHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2Mu +aHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRt +aW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2MV9kZXIuY3JsMA4GA1Ud +DwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZIhvcNAQEF +BQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdp +D70ER9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gU +JyCpZET/LtZ1qmxNYEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+m +AM/EKXMRNt6GGT6d7hmKG9Ww7Y49nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepD +vV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJTS+xJlsndQAJxGJ3KQhfnlms +tn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3sCPdK6jT2iWH +7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h +I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szA +h1xA2syVP1XgNce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xF +d3+YJ5oyXSrjhO7FmGYvliAd3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2H +pPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3pEfbRD0tVNEYqi4Y7 +-----END CERTIFICATE----- + +# Issuer: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA +# Subject: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA +# Label: "TWCA Global Root CA" +# Serial: 3262 +# MD5 Fingerprint: f9:03:7e:cf:e6:9e:3c:73:7a:2a:90:07:69:ff:2b:96 +# SHA1 Fingerprint: 9c:bb:48:53:f6:a4:f6:d3:52:a4:e8:32:52:55:60:13:f5:ad:af:65 +# SHA256 Fingerprint: 59:76:90:07:f7:68:5d:0f:cd:50:87:2f:9f:95:d5:75:5a:5b:2b:45:7d:81:f3:69:2b:61:0a:98:67:2f:0e:1b +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcx +EjAQBgNVBAoTCVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMT +VFdDQSBHbG9iYWwgUm9vdCBDQTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5 +NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQKEwlUQUlXQU4tQ0ExEDAOBgNVBAsT +B1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2CnJfF +10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz +0ALfUPZVr2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfCh +MBwqoJimFb3u/Rk28OKRQ4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbH +zIh1HrtsBv+baz4X7GGqcXzGHaL3SekVtTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc +46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1WKKD+u4ZqyPpcC1jcxkt2 +yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99sy2sbZCi +laLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYP +oA/pyJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQA +BDzfuBSO6N+pjWxnkjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcE +qYSjMq+u7msXi7Kx/mzhkIyIqJdIzshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm +4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6gcFGn90xHNcgL +1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn +LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WF +H6vPNOw/KP4M8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNo +RI2T9GRwoD2dKAXDOXC4Ynsg/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+ +nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlglPx4mI88k1HtQJAH32RjJMtOcQWh +15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryPA9gK8kxkRr05YuWW +6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3mi4TW +nsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5j +wa19hAM8EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWz +aGHQRiapIVJpLesux+t3zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmy +KwbQBM0= +-----END CERTIFICATE----- + +# Issuer: CN=TeliaSonera Root CA v1 O=TeliaSonera +# Subject: CN=TeliaSonera Root CA v1 O=TeliaSonera +# Label: "TeliaSonera Root CA v1" +# Serial: 199041966741090107964904287217786801558 +# MD5 Fingerprint: 37:41:49:1b:18:56:9a:26:f5:ad:c2:66:fb:40:a5:4c +# SHA1 Fingerprint: 43:13:bb:96:f1:d5:86:9b:c1:4e:6a:92:f6:cf:f6:34:69:87:82:37 +# SHA256 Fingerprint: dd:69:36:fe:21:f8:f0:77:c1:23:a1:a5:21:c1:22:24:f7:22:55:b7:3e:03:a7:26:06:93:e8:a2:4b:0f:a3:89 +-----BEGIN CERTIFICATE----- +MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAw +NzEUMBIGA1UECgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJv +b3QgQ0EgdjEwHhcNMDcxMDE4MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYD +VQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwWVGVsaWFTb25lcmEgUm9vdCBDQSB2 +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+6yfwIaPzaSZVfp3F +VRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA3GV1 +7CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+X +Z75Ljo1kB1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+ +/jXh7VB7qTCNGdMJjmhnXb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs +81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxHoLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkm +dtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3F0fUTPHSiXk+TT2YqGHe +Oh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJoWjiUIMu +sDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4 +pgd7gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fs +slESl1MpWtTwEhDcTwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQ +arMCpgKIv7NHfirZ1fpoeDVNAgMBAAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYD +VR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qWDNXr+nuqF+gTEjANBgkqhkiG +9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNmzqjMDfz1mgbl +dxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx +0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1Tj +TQpgcmLNkQfWpb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBed +Y2gea+zDTYa4EzAvXUYNR0PVG6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7 +Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpcc41teyWRyu5FrgZLAMzTsVlQ2jqI +OylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOTJsjrDNYmiLbAJM+7 +vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2qReW +t88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcn +HL/EVlP6Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVx +SK236thZiNSQvxaz2emsWWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY= +-----END CERTIFICATE----- + +# Issuer: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Subject: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Label: "T-TeleSec GlobalRoot Class 2" +# Serial: 1 +# MD5 Fingerprint: 2b:9b:9e:e4:7b:6c:1f:00:72:1a:cc:c1:77:79:df:6a +# SHA1 Fingerprint: 59:0d:2d:7d:88:4f:40:2e:61:7e:a5:62:32:17:65:cf:17:d8:94:e9 +# SHA256 Fingerprint: 91:e2:f5:78:8d:58:10:eb:a7:ba:58:73:7d:e1:54:8a:8e:ca:cd:01:45:98:bc:0b:14:3e:04:1b:17:05:25:52 +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgxMDAxMTA0MDE0WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUd +AqSzm1nzHoqvNK38DcLZSBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiC +FoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/FvudocP05l03Sx5iRUKrERLMjfTlH6VJi +1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx9702cu+fjOlbpSD8DT6Iavq +jnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGVWOHAD3bZ +wI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/ +WSA2AHmgoCJrjNXyYdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhy +NsZt+U2e+iKo4YFWz827n+qrkRk4r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPAC +uvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNfvNoBYimipidx5joifsFvHZVw +IEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR3p1m0IvVVGb6 +g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN +9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlP +BSeOE6Fuwg== +-----END CERTIFICATE----- + +# Issuer: CN=Atos TrustedRoot 2011 O=Atos +# Subject: CN=Atos TrustedRoot 2011 O=Atos +# Label: "Atos TrustedRoot 2011" +# Serial: 6643877497813316402 +# MD5 Fingerprint: ae:b9:c4:32:4b:ac:7f:5d:66:cc:77:94:bb:2a:77:56 +# SHA1 Fingerprint: 2b:b1:f5:3e:55:0c:1d:c5:f1:d4:e6:b7:6a:46:4b:55:06:02:ac:21 +# SHA256 Fingerprint: f3:56:be:a2:44:b7:a9:1e:b3:5d:53:ca:9a:d7:86:4a:ce:01:8e:2d:35:d5:f8:f9:6d:df:68:a6:f4:1a:a4:74 +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UE +AwwVQXRvcyBUcnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQG +EwJERTAeFw0xMTA3MDcxNDU4MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMM +FUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsGA1UECgwEQXRvczELMAkGA1UEBhMC +REUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCVhTuXbyo7LjvPpvMp +Nb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr54rM +VD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+ +SZFhyBH+DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ +4J7sVaE3IqKHBAUsR320HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0L +cp2AMBYHlT8oDv3FdU9T1nSatCQujgKRz3bFmx5VdJx4IbHwLfELn8LVlhgf8FQi +eowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7Rl+lwrrw7GWzbITAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZbNshMBgG +A1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3 +DQEBCwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8j +vZfza1zv7v1Apt+hk6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kP +DpFrdRbhIfzYJsdHt6bPWHJxfrrhTZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pc +maHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a961qn8FYiqTxlVMYVqL2Gns2D +lmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G3mB/ufNPRJLv +KrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 1 G3" +# Serial: 687049649626669250736271037606554624078720034195 +# MD5 Fingerprint: a4:bc:5b:3f:fe:37:9a:fa:64:f0:e2:fa:05:3d:0b:ab +# SHA1 Fingerprint: 1b:8e:ea:57:96:29:1a:c9:39:ea:b8:0a:81:1a:73:73:c0:93:79:67 +# SHA256 Fingerprint: 8a:86:6f:d1:b2:76:b5:7e:57:8e:92:1c:65:82:8a:2b:ed:58:e9:f2:f2:88:05:41:34:b7:f1:f4:bf:c9:cc:74 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00 +MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakEPBtV +wedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWe +rNrwU8lmPNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF341 +68Xfuw6cwI2H44g4hWf6Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh +4Pw5qlPafX7PGglTvF0FBM+hSo+LdoINofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXp +UhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/lg6AnhF4EwfWQvTA9xO+o +abw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV7qJZjqlc +3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/G +KubX9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSt +hfbZxbGL0eUQMk1fiyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KO +Tk0k+17kBL5yG6YnLUlamXrXXAkgt3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOt +zCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZIhvcNAQELBQAD +ggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC +MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2 +cDMT/uFPpiN3GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUN +qXsCHKnQO18LwIE6PWThv6ctTr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5 +YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP+V04ikkwj+3x6xn0dxoxGE1nVGwv +b2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh3jRJjehZrJ3ydlo2 +8hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fawx/k +NSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNj +ZgKAvQU6O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhp +q1467HxpvMc7hU6eFbm0FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFt +nh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOVhMJKzRwuJIczYOXD +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 2 G3" +# Serial: 390156079458959257446133169266079962026824725800 +# MD5 Fingerprint: af:0c:86:6e:bf:40:2d:7f:0b:3e:12:50:ba:12:3d:06 +# SHA1 Fingerprint: 09:3c:61:f3:8b:8b:dc:7d:55:df:75:38:02:05:00:e1:25:f5:c8:36 +# SHA256 Fingerprint: 8f:e4:fb:0a:f9:3a:4d:0d:67:db:0b:eb:b2:3e:37:c7:1b:f3:25:dc:bc:dd:24:0e:a0:4d:af:58:b4:7e:18:40 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00 +MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf +qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW +n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym +c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+ +O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1 +o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j +IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq +IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz +8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh +vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l +7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG +cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD +ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66 +AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC +roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga +W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n +lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE ++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV +csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd +dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg +KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM +HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4 +WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 3 G3" +# Serial: 268090761170461462463995952157327242137089239581 +# MD5 Fingerprint: df:7d:b9:ad:54:6f:68:a1:df:89:57:03:97:43:b0:d7 +# SHA1 Fingerprint: 48:12:bd:92:3c:a8:c4:39:06:e7:30:6d:27:96:e6:a4:cf:22:2e:7d +# SHA256 Fingerprint: 88:ef:81:de:20:2e:b0:18:45:2e:43:f8:64:72:5c:ea:5f:bd:1f:c2:d9:d2:05:73:07:09:c5:d8:b8:69:0f:46 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00 +MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286IxSR +/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNu +FoM7pmRLMon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXR +U7Ox7sWTaYI+FrUoRqHe6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+c +ra1AdHkrAj80//ogaX3T7mH1urPnMNA3I4ZyYUUpSFlob3emLoG+B01vr87ERROR +FHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3UVDmrJqMz6nWB2i3ND0/k +A9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f75li59wzw +eyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634Ryl +sSqiMd5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBp +VzgeAVuNVejH38DMdyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0Q +A4XN8f+MFrXBsj6IbGB/kE+V9/YtrQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+ +ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZIhvcNAQELBQAD +ggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px +KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnI +FUBhynLWcKzSt/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5Wvv +oxXqA/4Ti2Tk08HS6IT7SdEQTXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFg +u/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9DuDcpmvJRPpq3t/O5jrFc/ZSXPsoaP +0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGibIh6BJpsQBJFxwAYf +3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmDhPbl +8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+ +DhcI00iX0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HN +PlopNLk9hM6xZdRZkZFWdSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/ +ywaZWWDYWGWVjUTR939+J399roD1B0y2PpxxVJkES/1Y+Zj0 +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root G2" +# Serial: 15385348160840213938643033620894905419 +# MD5 Fingerprint: 92:38:b9:f8:63:24:82:65:2c:57:33:e6:fe:81:8f:9d +# SHA1 Fingerprint: a1:4b:48:d9:43:ee:0a:0e:40:90:4f:3c:e0:a4:c0:91:93:51:5d:3f +# SHA256 Fingerprint: 7d:05:eb:b6:82:33:9f:8c:94:51:ee:09:4e:eb:fe:fa:79:53:a1:14:ed:b2:f4:49:49:45:2f:ab:7d:2f:c1:85 +-----BEGIN CERTIFICATE----- +MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSA +n61UQbVH35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4Htecc +biJVMWWXvdMX0h5i89vqbFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9Hp +EgjAALAcKxHad3A2m67OeYfcgnDmCXRwVWmvo2ifv922ebPynXApVfSr/5Vh88lA +bx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OPYLfykqGxvYmJHzDNw6Yu +YjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+RnlTGNAgMB +AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQW +BBTOw0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPI +QW5pJ6d1Ee88hjZv0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I +0jJmwYrA8y8678Dj1JGG0VDjA9tzd29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4Gni +lmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAWhsI6yLETcDbYz+70CjTVW0z9 +B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0MjomZmWzwPDCv +ON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo +IhNzbM8m9Yop5w== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root G3" +# Serial: 15459312981008553731928384953135426796 +# MD5 Fingerprint: 7c:7f:65:31:0c:81:df:8d:ba:3e:99:e2:5c:ad:6e:fb +# SHA1 Fingerprint: f5:17:a2:4f:9a:48:c6:c9:f8:a2:00:26:9f:dc:0f:48:2c:ab:30:89 +# SHA256 Fingerprint: 7e:37:cb:8b:4c:47:09:0c:ab:36:55:1b:a6:f4:5d:b8:40:68:0f:ba:16:6a:95:2d:b1:00:71:7f:43:05:3f:c2 +-----BEGIN CERTIFICATE----- +MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3Qg +RzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJf +Zn4f5dwbRXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17Q +RSAPWXYQ1qAk8C3eNvJsKTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgFUaFNN6KDec6NHSrkhDAKBggqhkjOPQQD +AwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5FyYZ5eEJJZVrmDxxDnOOlY +JjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy1vUhZscv +6pZjamVFkpUBtA== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root G2" +# Serial: 4293743540046975378534879503202253541 +# MD5 Fingerprint: e4:a6:8a:c8:54:ac:52:42:46:0a:fd:72:48:1b:2a:44 +# SHA1 Fingerprint: df:3c:24:f9:bf:d6:66:76:1b:26:80:73:fe:06:d1:cc:8d:4f:82:a4 +# SHA256 Fingerprint: cb:3c:cb:b7:60:31:e5:e0:13:8f:8d:d3:9a:23:f9:de:47:ff:c3:5e:43:c1:14:4c:ea:27:d4:6a:5a:b1:cb:5f +-----BEGIN CERTIFICATE----- +MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH +MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI +2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx +1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ +q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz +tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ +vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV +5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY +1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4 +NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG +Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91 +8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe +pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl +MrY= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root G3" +# Serial: 7089244469030293291760083333884364146 +# MD5 Fingerprint: f5:5d:a4:50:a5:fb:28:7e:1e:0f:0d:cc:96:57:56:ca +# SHA1 Fingerprint: 7e:04:de:89:6a:3e:66:6d:00:e6:87:d3:3f:fa:d9:3b:e8:3d:34:9e +# SHA256 Fingerprint: 31:ad:66:48:f8:10:41:38:c7:38:f3:9e:a4:32:01:33:39:3e:3a:18:cc:02:29:6e:f9:7c:2a:c9:ef:67:31:d0 +-----BEGIN CERTIFICATE----- +MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAe +Fw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVTMRUw +EwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20x +IDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0CAQYF +K4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FG +fp4tn+6OYwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPO +Z9wj/wMco+I+o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAd +BgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNpYim8S8YwCgYIKoZIzj0EAwMDaAAwZQIx +AK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y3maTD/HMsQmP3Wyr+mt/ +oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34VOKa5Vt8 +sycX +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Trusted Root G4" +# Serial: 7451500558977370777930084869016614236 +# MD5 Fingerprint: 78:f2:fc:aa:60:1f:2f:b4:eb:c9:37:ba:53:2e:75:49 +# SHA1 Fingerprint: dd:fb:16:cd:49:31:c9:73:a2:03:7d:3f:c8:3a:4d:7d:77:5d:05:e4 +# SHA256 Fingerprint: 55:2f:7b:dc:f1:a7:af:9e:6c:e6:72:01:7f:4f:12:ab:f7:72:40:c7:8e:76:1a:c2:03:d1:d9:d2:0a:c8:99:88 +-----BEGIN CERTIFICATE----- +MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg +RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y +ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If +xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV +ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO +DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ +jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/ +CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi +EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM +fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY +uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK +chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t +9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD +ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2 +SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd ++SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc +fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa +sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N +cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N +0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie +4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI +r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1 +/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm +gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+ +-----END CERTIFICATE----- + +# Issuer: CN=COMODO RSA Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO RSA Certification Authority O=COMODO CA Limited +# Label: "COMODO RSA Certification Authority" +# Serial: 101909084537582093308941363524873193117 +# MD5 Fingerprint: 1b:31:b0:71:40:36:cc:14:36:91:ad:c4:3e:fd:ec:18 +# SHA1 Fingerprint: af:e5:d2:44:a8:d1:19:42:30:ff:47:9f:e2:f8:97:bb:cd:7a:8c:b4 +# SHA256 Fingerprint: 52:f0:e1:c4:e5:8e:c6:29:29:1b:60:31:7f:07:46:71:b8:5d:7e:a8:0d:5b:07:27:34:63:53:4b:32:b4:02:34 +-----BEGIN CERTIFICATE----- +MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCB +hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV +BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMTE5 +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT +EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR +6FSS0gpWsawNJN3Fz0RndJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8X +pz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZFGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC +9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+5eNu/Nio5JIk2kNrYrhV +/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pGx8cgoLEf +Zd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z ++pUX2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7w +qP/0uK3pN/u6uPQLOvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZah +SL0896+1DSJMwBGB7FY79tOi4lu3sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVIC +u9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+CGCe01a60y1Dma/RMhnEw6abf +Fobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5WdYgGq/yapiq +crxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E +FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB +/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvl +wFTPoCWOAvn9sKIN9SCYPBMtrFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM +4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+nq6PK7o9mfjYcwlYRm6mnPTXJ9OV +2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSgtZx8jb8uk2Intzna +FxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwWsRqZ +CuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiK +boHGhfKppC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmcke +jkk9u+UJueBPSZI9FoJAzMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yL +S0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHqZJx64SIDqZxubw5lT2yHh17zbqD5daWb +QOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk527RH89elWsn2/x20Kk4yl +0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7ILaZRfyHB +NVOFBkpdn627G190 +-----END CERTIFICATE----- + +# Issuer: CN=USERTrust RSA Certification Authority O=The USERTRUST Network +# Subject: CN=USERTrust RSA Certification Authority O=The USERTRUST Network +# Label: "USERTrust RSA Certification Authority" +# Serial: 2645093764781058787591871645665788717 +# MD5 Fingerprint: 1b:fe:69:d1:91:b7:19:33:a3:72:a8:0f:e1:55:e5:b5 +# SHA1 Fingerprint: 2b:8f:1b:57:33:0d:bb:a2:d0:7a:6c:51:f7:0e:e9:0d:da:b9:ad:8e +# SHA256 Fingerprint: e7:93:c9:b0:2f:d8:aa:13:e2:1c:31:22:8a:cc:b0:81:19:64:3b:74:9c:89:89:64:b1:74:6d:46:c3:d4:cb:d2 +-----BEGIN CERTIFICATE----- +MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB +iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl +cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV +BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw +MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B +3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY +tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/ +Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2 +VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT +79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6 +c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT +Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l +c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee +UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE +Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd +BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF +Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO +VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3 +ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs +8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR +iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze +Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ +XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/ +qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB +VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB +L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG +jjxDah2nGN59PRbxYvnKkKj9 +-----END CERTIFICATE----- + +# Issuer: CN=USERTrust ECC Certification Authority O=The USERTRUST Network +# Subject: CN=USERTrust ECC Certification Authority O=The USERTRUST Network +# Label: "USERTrust ECC Certification Authority" +# Serial: 123013823720199481456569720443997572134 +# MD5 Fingerprint: fa:68:bc:d9:b5:7f:ad:fd:c9:1d:06:83:28:cc:24:c1 +# SHA1 Fingerprint: d1:cb:ca:5d:b2:d5:2a:7f:69:3b:67:4d:e5:f0:5a:1d:0c:95:7d:f0 +# SHA256 Fingerprint: 4f:f4:60:d5:4b:9c:86:da:bf:bc:fc:57:12:e0:40:0d:2b:ed:3f:bc:4d:4f:bd:aa:86:e0:6a:dc:d2:a9:ad:7a +-----BEGIN CERTIFICATE----- +MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDEL +MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl +eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT +JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMjAx +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgT +Ck5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVUaGUg +VVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqflo +I+d61SRvU8Za2EurxtW20eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinng +o4N+LZfQYcTxmdwlkWOrfzCjtHDix6EznPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0G +A1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBBHU6+4WMB +zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW +RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg= +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 +# Label: "GlobalSign ECC Root CA - R4" +# Serial: 14367148294922964480859022125800977897474 +# MD5 Fingerprint: 20:f0:27:68:d1:7e:a0:9d:0e:e6:2a:ca:df:5c:89:8e +# SHA1 Fingerprint: 69:69:56:2e:40:80:f4:24:a1:e7:19:9f:14:ba:f3:ee:58:ab:6a:bb +# SHA256 Fingerprint: be:c9:49:11:c2:95:56:76:db:6c:0a:55:09:86:d7:6e:3b:a0:05:66:7c:44:2c:97:62:b4:fb:b7:73:de:22:8c +-----BEGIN CERTIFICATE----- +MIIB4TCCAYegAwIBAgIRKjikHJYKBN5CsiilC+g0mAIwCgYIKoZIzj0EAwIwUDEk +MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpH +bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX +DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD +QSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu +MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuMZ5049sJQ6fLjkZHAOkrprlOQcJ +FspjsbmG+IpXwVfOQvpzofdlQv8ewQCybnMO/8ch5RikqtlxP6jUuc6MHaNCMEAw +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFSwe61F +uOJAf/sKbvu+M8k8o4TVMAoGCCqGSM49BAMCA0gAMEUCIQDckqGgE6bPA7DmxCGX +kPoUVy0D7O48027KqGx2vKLeuwIgJ6iFJzWbVsaj8kfSt24bAgAXqmemFZHe+pTs +ewv4n4Q= +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 +# Label: "GlobalSign ECC Root CA - R5" +# Serial: 32785792099990507226680698011560947931244 +# MD5 Fingerprint: 9f:ad:3b:1c:02:1e:8a:ba:17:74:38:81:0c:a2:bc:08 +# SHA1 Fingerprint: 1f:24:c6:30:cd:a4:18:ef:20:69:ff:ad:4f:dd:5f:46:3a:1b:69:aa +# SHA256 Fingerprint: 17:9f:bc:14:8a:3d:d0:0f:d2:4e:a1:34:58:cc:43:bf:a7:f5:9c:81:82:d7:83:a5:13:f6:eb:ec:10:0c:89:24 +-----BEGIN CERTIFICATE----- +MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEk +MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpH +bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX +DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD +QSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu +MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6SFkc +8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8ke +hOvRnkmSh5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYI +KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg +515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7yFz9SO8NdCKoCOJuxUnO +xwy8p2Fp8fc74SrL+SvzZpA3 +-----END CERTIFICATE----- + +# Issuer: CN=Staat der Nederlanden Root CA - G3 O=Staat der Nederlanden +# Subject: CN=Staat der Nederlanden Root CA - G3 O=Staat der Nederlanden +# Label: "Staat der Nederlanden Root CA - G3" +# Serial: 10003001 +# MD5 Fingerprint: 0b:46:67:07:db:10:2f:19:8c:35:50:60:d1:0b:f4:37 +# SHA1 Fingerprint: d8:eb:6b:41:51:92:59:e0:f3:e7:85:00:c0:3d:b6:88:97:c9:ee:fc +# SHA256 Fingerprint: 3c:4f:b0:b9:5a:b8:b3:00:32:f4:32:b8:6f:53:5f:e1:72:c1:85:d0:fd:39:86:58:37:cf:36:18:7f:a6:f4:28 +-----BEGIN CERTIFICATE----- +MIIFdDCCA1ygAwIBAgIEAJiiOTANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO +TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh +dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEczMB4XDTEzMTExNDExMjg0MloX +DTI4MTExMzIzMDAwMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl +ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv +b3QgQ0EgLSBHMzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL4yolQP +cPssXFnrbMSkUeiFKrPMSjTysF/zDsccPVMeiAho2G89rcKezIJnByeHaHE6n3WW +IkYFsO2tx1ueKt6c/DrGlaf1F2cY5y9JCAxcz+bMNO14+1Cx3Gsy8KL+tjzk7FqX +xz8ecAgwoNzFs21v0IJyEavSgWhZghe3eJJg+szeP4TrjTgzkApyI/o1zCZxMdFy +KJLZWyNtZrVtB0LrpjPOktvA9mxjeM3KTj215VKb8b475lRgsGYeCasH/lSJEULR +9yS6YHgamPfJEf0WwTUaVHXvQ9Plrk7O53vDxk5hUUurmkVLoR9BvUhTFXFkC4az +5S6+zqQbwSmEorXLCCN2QyIkHxcE1G6cxvx/K2Ya7Irl1s9N9WMJtxU51nus6+N8 +6U78dULI7ViVDAZCopz35HCz33JvWjdAidiFpNfxC95DGdRKWCyMijmev4SH8RY7 +Ngzp07TKbBlBUgmhHbBqv4LvcFEhMtwFdozL92TkA1CvjJFnq8Xy7ljY3r735zHP +bMk7ccHViLVlvMDoFxcHErVc0qsgk7TmgoNwNsXNo42ti+yjwUOH5kPiNL6VizXt +BznaqB16nzaeErAMZRKQFWDZJkBE41ZgpRDUajz9QdwOWke275dhdU/Z/seyHdTt +XUmzqWrLZoQT1Vyg3N9udwbRcXXIV2+vD3dbAgMBAAGjQjBAMA8GA1UdEwEB/wQF +MAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRUrfrHkleuyjWcLhL75Lpd +INyUVzANBgkqhkiG9w0BAQsFAAOCAgEAMJmdBTLIXg47mAE6iqTnB/d6+Oea31BD +U5cqPco8R5gu4RV78ZLzYdqQJRZlwJ9UXQ4DO1t3ApyEtg2YXzTdO2PCwyiBwpwp +LiniyMMB8jPqKqrMCQj3ZWfGzd/TtiunvczRDnBfuCPRy5FOCvTIeuXZYzbB1N/8 +Ipf3YF3qKS9Ysr1YvY2WTxB1v0h7PVGHoTx0IsL8B3+A3MSs/mrBcDCw6Y5p4ixp +gZQJut3+TcCDjJRYwEYgr5wfAvg1VUkvRtTA8KCWAg8zxXHzniN9lLf9OtMJgwYh +/WA9rjLA0u6NpvDntIJ8CsxwyXmA+P5M9zWEGYox+wrZ13+b8KKaa8MFSu1BYBQw +0aoRQm7TIwIEC8Zl3d1Sd9qBa7Ko+gE4uZbqKmxnl4mUnrzhVNXkanjvSr0rmj1A +fsbAddJu+2gw7OyLnflJNZoaLNmzlTnVHpL3prllL+U9bTpITAjc5CgSKL59NVzq +4BZ+Extq1z7XnvwtdbLBFNUjA9tbbws+eC8N3jONFrdI54OagQ97wUNNVQQXOEpR +1VmiiXTTn74eS9fGbbeIJG9gkaSChVtWQbzQRKtqE77RLFi3EjNYsjdj3BP1lB0/ +QFH1T/U67cjF68IeHRaVesd+QnGTbksVtzDfqu1XhUisHWrdOWnk4Xl4vs4Fv6EM +94B7IWcnMFk= +-----END CERTIFICATE----- + +# Issuer: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden +# Subject: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden +# Label: "Staat der Nederlanden EV Root CA" +# Serial: 10000013 +# MD5 Fingerprint: fc:06:af:7b:e8:1a:f1:9a:b4:e8:d2:70:1f:c0:f5:ba +# SHA1 Fingerprint: 76:e2:7e:c1:4f:db:82:c1:c0:a6:75:b5:05:be:3d:29:b4:ed:db:bb +# SHA256 Fingerprint: 4d:24:91:41:4c:fe:95:67:46:ec:4c:ef:a6:cf:6f:72:e2:8a:13:29:43:2f:9d:8a:90:7a:c4:cb:5d:ad:c1:5a +-----BEGIN CERTIFICATE----- +MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJO +TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFh +dCBkZXIgTmVkZXJsYW5kZW4gRVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0y +MjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5MMR4wHAYDVQQKDBVTdGFhdCBkZXIg +TmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRlcmxhbmRlbiBFViBS +b290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkkSzrS +M4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nC +UiY4iKTWO0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3d +Z//BYY1jTw+bbRcwJu+r0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46p +rfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13l +pJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gVXJrm0w912fxBmJc+qiXb +j5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr08C+eKxC +KFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS +/ZbV0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0X +cgOPvZuM5l5Tnrmd74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH +1vI4gnPah1vlPNOePqc7nvQDs/nxfRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrP +px9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwaivsnuL8wbqg7 +MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI +eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u +2dfOWBfoqSmuc0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHS +v4ilf0X8rLiltTMMgsT7B/Zq5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTC +wPTxGfARKbalGAKb12NMcIxHowNDXLldRqANb/9Zjr7dn3LDWyvfjFvO5QxGbJKy +CqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tNf1zuacpzEPuKqf2e +vTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi5Dp6 +Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIa +Gl6I6lD4WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeL +eG9QgkRQP2YGiqtDhFZKDyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8 +FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGyeUN51q1veieQA6TqJIc/2b3Z6fJfUEkc +7uzXLg== +-----END CERTIFICATE----- + +# Issuer: CN=IdenTrust Commercial Root CA 1 O=IdenTrust +# Subject: CN=IdenTrust Commercial Root CA 1 O=IdenTrust +# Label: "IdenTrust Commercial Root CA 1" +# Serial: 13298821034946342390520003877796839426 +# MD5 Fingerprint: b3:3e:77:73:75:ee:a0:d3:e3:7e:49:63:49:59:bb:c7 +# SHA1 Fingerprint: df:71:7e:aa:4a:d9:4e:c9:55:84:99:60:2d:48:de:5f:bc:f0:3a:25 +# SHA256 Fingerprint: 5d:56:49:9b:e4:d2:e0:8b:cf:ca:d0:8a:3e:38:72:3d:50:50:3b:de:70:69:48:e4:2f:55:60:30:19:e5:28:ae +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBK +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVu +VHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQw +MTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScw +JQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ldhNlT +3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU ++ehcCuz/mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gp +S0l4PJNgiCL8mdo2yMKi1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1 +bVoE/c40yiTcdCMbXTMTEl3EASX2MN0CXZ/g1Ue9tOsbobtJSdifWwLziuQkkORi +T0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl3ZBWzvurpWCdxJ35UrCL +vYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzyNeVJSQjK +Vsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZK +dHzVWYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHT +c+XvvqDtMwt0viAgxGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hv +l7yTmvmcEpB4eoCHFddydJxVdHixuuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5N +iGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZIhvcNAQELBQAD +ggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH +6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwt +LRvM7Kqas6pgghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93 +nAbowacYXVKV7cndJZ5t+qntozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3 ++wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmVYjzlVYA211QC//G5Xc7UI2/YRYRK +W2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUXfeu+h1sXIFRRk0pT +AwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/rokTLq +l1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG +4iZZRHUe2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZ +mUlO+KWA2yUPHGNiiskzZ2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A +7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7RcGzM7vRX+Bi6hG6H +-----END CERTIFICATE----- + +# Issuer: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust +# Subject: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust +# Label: "IdenTrust Public Sector Root CA 1" +# Serial: 13298821034946342390521976156843933698 +# MD5 Fingerprint: 37:06:a5:b0:fc:89:9d:ba:f4:6b:8c:1a:64:cd:d5:ba +# SHA1 Fingerprint: ba:29:41:60:77:98:3f:f4:f3:ef:f2:31:05:3b:2e:ea:6d:4d:45:fd +# SHA256 Fingerprint: 30:d0:89:5a:9a:44:8a:26:20:91:63:55:22:d1:f5:20:10:b5:86:7a:ca:e1:2c:78:ef:95:8f:d4:f4:38:9f:2f +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBN +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVu +VHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcN +MzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0 +MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTyP4o7 +ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGy +RBb06tD6Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlS +bdsHyo+1W/CD80/HLaXIrcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF +/YTLNiCBWS2ab21ISGHKTN9T0a9SvESfqy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R +3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoSmJxZZoY+rfGwyj4GD3vw +EUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFnol57plzy +9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9V +GxyhLrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ +2fjXctscvG29ZV/viDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsV +WaFHVCkugyhfHMKiq3IXAAaOReyL4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gD +W/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMwDQYJKoZIhvcN +AQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj +t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHV +DRDtfULAj+7AmgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9 +TaDKQGXSc3z1i9kKlT/YPyNtGtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8G +lwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFtm6/n6J91eEyrRjuazr8FGF1NFTwW +mhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMxNRF4eKLg6TCMf4Df +WN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4Mhn5 ++bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJ +tshquDDIajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhA +GaQdp/lLQzfcaFpPz+vCZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv +8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ3Wl9af0AVqW3rLatt8o+Ae+c +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - G2" +# Serial: 1246989352 +# MD5 Fingerprint: 4b:e2:c9:91:96:65:0c:f4:0e:5a:93:92:a0:0a:fe:b2 +# SHA1 Fingerprint: 8c:f4:27:fd:79:0c:3a:d1:66:06:8d:e8:1e:57:ef:bb:93:22:72:d4 +# SHA256 Fingerprint: 43:df:57:74:b0:3e:7f:ef:5f:e4:0d:93:1a:7b:ed:f1:bb:2e:6b:42:73:8c:4e:6d:38:41:10:3d:3a:a7:f3:39 +-----BEGIN CERTIFICATE----- +MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50 +cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3Qs +IEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVz +dCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwHhcNMDkwNzA3MTcy +NTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUVu +dHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwt +dGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0 +aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP/vaCeb9zYQYKpSfYs1/T +RU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXzHHfV1IWN +cCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hW +wcKUs/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1 +U1+cPvQXLOZprE4yTGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0 +jaWvYkxN4FisZDQSA/i2jZRjJKRxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ60B7vfec7aVHUbI2fkBJmqzAN +BgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5ZiXMRrEPR9RP/ +jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ +Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v +1fN2D807iDginWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4R +nAuknZoh8/CbCzB428Hch0P+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmH +VHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xOe4pIb4tF9g== +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - EC1" +# Serial: 51543124481930649114116133369 +# MD5 Fingerprint: b6:7e:1d:f0:58:c5:49:6c:24:3b:3d:ed:98:18:ed:bc +# SHA1 Fingerprint: 20:d8:06:40:df:9b:25:f5:12:25:3a:11:ea:f7:59:8a:eb:14:b5:47 +# SHA256 Fingerprint: 02:ed:0e:b2:8c:14:da:45:16:5c:56:67:91:70:0d:64:51:d7:fb:56:f0:b2:ab:1d:3b:8e:b0:70:e5:6e:df:f5 +-----BEGIN CERTIFICATE----- +MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkG +A1UEBhMCVVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3 +d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVu +dHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEzMDEGA1UEAxMq +RW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRUMxMB4XDTEy +MTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYwFAYD +VQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0 +L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0g +Zm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEVDMTB2MBAGByqGSM49AgEGBSuBBAAi +A2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHyAsWfoPZb1YsGGYZPUxBt +ByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef9eNi1KlH +Bz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O +BBYEFLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVC +R98crlOZF7ZvHH3hvxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nX +hTcGtXsI/esni0qU+eH6p44mCOh8kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G +-----END CERTIFICATE----- + +# Issuer: CN=CFCA EV ROOT O=China Financial Certification Authority +# Subject: CN=CFCA EV ROOT O=China Financial Certification Authority +# Label: "CFCA EV ROOT" +# Serial: 407555286 +# MD5 Fingerprint: 74:e1:b6:ed:26:7a:7a:44:30:33:94:ab:7b:27:81:30 +# SHA1 Fingerprint: e2:b8:29:4b:55:84:ab:6b:58:c2:90:46:6c:ac:3f:b8:39:8f:84:83 +# SHA256 Fingerprint: 5c:c3:d7:8e:4e:1d:5e:45:54:7a:04:e6:87:3e:64:f9:0c:f9:53:6d:1c:cc:2e:f8:00:f3:55:c4:c5:fd:70:fd +-----BEGIN CERTIFICATE----- +MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJD +TjEwMC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9y +aXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkx +MjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEwMC4GA1UECgwnQ2hpbmEgRmluYW5j +aWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJP +T1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnVBU03 +sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpL +TIpTUnrD7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5 +/ZOkVIBMUtRSqy5J35DNuF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp +7hZZLDRJGqgG16iI0gNyejLi6mhNbiyWZXvKWfry4t3uMCz7zEasxGPrb382KzRz +EpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7xzbh72fROdOXW3NiGUgt +hxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9fpy25IGvP +a931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqot +aK8KgWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNg +TnYGmE69g60dWIolhdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfV +PKPtl8MeNPo4+QgO48BdK4PRVmrJtqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hv +cWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAfBgNVHSMEGDAWgBTj/i39KNAL +tbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAd +BgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB +ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObT +ej/tUxPQ4i9qecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdL +jOztUmCypAbqTuv0axn96/Ua4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBS +ESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sGE5uPhnEFtC+NiWYzKXZUmhH4J/qy +P5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfXBDrDMlI1Dlb4pd19 +xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjnaH9d +Ci77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN +5mydLIhyPDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe +/v5WOaHIz16eGWRGENoXkbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+Z +AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ +5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su +-----END CERTIFICATE----- + +# Issuer: CN=T\xdcRKTRUST Elektronik Sertifika Hizmet Sa\u011flay\u0131c\u0131s\u0131 H5 O=T\xdcRKTRUST Bilgi \u0130leti\u015fim ve Bili\u015fim G\xfcvenli\u011fi Hizmetleri A.\u015e. +# Subject: CN=T\xdcRKTRUST Elektronik Sertifika Hizmet Sa\u011flay\u0131c\u0131s\u0131 H5 O=T\xdcRKTRUST Bilgi \u0130leti\u015fim ve Bili\u015fim G\xfcvenli\u011fi Hizmetleri A.\u015e. +# Label: "T\xdcRKTRUST Elektronik Sertifika Hizmet Sa\u011flay\u0131c\u0131s\u0131 H5" +# Serial: 156233699172481 +# MD5 Fingerprint: da:70:8e:f0:22:df:93:26:f6:5f:9f:d3:15:06:52:4e +# SHA1 Fingerprint: c4:18:f6:4d:46:d1:df:00:3d:27:30:13:72:43:a9:12:11:c6:75:fb +# SHA256 Fingerprint: 49:35:1b:90:34:44:c1:85:cc:dc:5c:69:3d:24:d8:55:5c:b2:08:d6:a8:14:13:07:69:9f:4a:f0:63:19:9d:78 +-----BEGIN CERTIFICATE----- +MIIEJzCCAw+gAwIBAgIHAI4X/iQggTANBgkqhkiG9w0BAQsFADCBsTELMAkGA1UE +BhMCVFIxDzANBgNVBAcMBkFua2FyYTFNMEsGA1UECgxEVMOcUktUUlVTVCBCaWxn +aSDEsGxldGnFn2ltIHZlIEJpbGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkg +QS7Fni4xQjBABgNVBAMMOVTDnFJLVFJVU1QgRWxla3Ryb25payBTZXJ0aWZpa2Eg +SGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSBINTAeFw0xMzA0MzAwODA3MDFaFw0yMzA0 +MjgwODA3MDFaMIGxMQswCQYDVQQGEwJUUjEPMA0GA1UEBwwGQW5rYXJhMU0wSwYD +VQQKDERUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8 +dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLjFCMEAGA1UEAww5VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIEg1MIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApCUZ4WWe60ghUEoI5RHwWrom +/4NZzkQqL/7hzmAD/I0Dpe3/a6i6zDQGn1k19uwsu537jVJp45wnEFPzpALFp/kR +Gml1bsMdi9GYjZOHp3GXDSHHmflS0yxjXVW86B8BSLlg/kJK9siArs1mep5Fimh3 +4khon6La8eHBEJ/rPCmBp+EyCNSgBbGM+42WAA4+Jd9ThiI7/PS98wl+d+yG6w8z +5UNP9FR1bSmZLmZaQ9/LXMrI5Tjxfjs1nQ/0xVqhzPMggCTTV+wVunUlm+hkS7M0 +hO8EuPbJbKoCPrZV4jI3X/xml1/N1p7HIL9Nxqw/dV8c7TKcfGkAaZHjIxhT6QID +AQABo0IwQDAdBgNVHQ4EFgQUVpkHHtOsDGlktAxQR95DLL4gwPswDgYDVR0PAQH/ +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAJ5FdnsX +SDLyOIspve6WSk6BGLFRRyDN0GSxDsnZAdkJzsiZ3GglE9Rc8qPoBP5yCccLqh0l +VX6Wmle3usURehnmp349hQ71+S4pL+f5bFgWV1Al9j4uPqrtd3GqqpmWRgqujuwq +URawXs3qZwQcWDD1YIq9pr1N5Za0/EKJAWv2cMhQOQwt1WbZyNKzMrcbGW3LM/nf +peYVhDfwwvJllpKQd/Ct9JDpEXjXk4nAPQu6KfTomZ1yju2dL+6SfaHx/126M2CF +Yv4HAqGEVka+lgqaE9chTLd8B59OTj+RdPsnnRHM3eaxynFNExc5JsUpISuTKWqW ++qtB4Uu2NQvAmxU= +-----END CERTIFICATE----- + +# Issuer: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903 +# Subject: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903 +# Label: "Certinomis - Root CA" +# Serial: 1 +# MD5 Fingerprint: 14:0a:fd:8d:a8:28:b5:38:69:db:56:7e:61:22:03:3f +# SHA1 Fingerprint: 9d:70:bb:01:a5:a4:a0:18:11:2e:f7:1c:01:b9:32:c5:34:e7:88:a8 +# SHA256 Fingerprint: 2a:99:f5:bc:11:74:b7:3c:bb:1d:62:08:84:e0:1c:34:e5:1c:cb:39:78:da:12:5f:0e:33:26:88:83:bf:41:58 +-----BEGIN CERTIFICATE----- +MIIFkjCCA3qgAwIBAgIBATANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJGUjET +MBEGA1UEChMKQ2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxHTAb +BgNVBAMTFENlcnRpbm9taXMgLSBSb290IENBMB4XDTEzMTAyMTA5MTcxOFoXDTMz +MTAyMTA5MTcxOFowWjELMAkGA1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMx +FzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMR0wGwYDVQQDExRDZXJ0aW5vbWlzIC0g +Um9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANTMCQosP5L2 +fxSeC5yaah1AMGT9qt8OHgZbn1CF6s2Nq0Nn3rD6foCWnoR4kkjW4znuzuRZWJfl +LieY6pOod5tK8O90gC3rMB+12ceAnGInkYjwSond3IjmFPnVAy//ldu9n+ws+hQV +WZUKxkd8aRi5pwP5ynapz8dvtF4F/u7BUrJ1Mofs7SlmO/NKFoL21prbcpjp3vDF +TKWrteoB4owuZH9kb/2jJZOLyKIOSY008B/sWEUuNKqEUL3nskoTuLAPrjhdsKkb +5nPJWqHZZkCqqU2mNAKthH6yI8H7KsZn9DS2sJVqM09xRLWtwHkziOC/7aOgFLSc +CbAK42C++PhmiM1b8XcF4LVzbsF9Ri6OSyemzTUK/eVNfaoqoynHWmgE6OXWk6Ri +wsXm9E/G+Z8ajYJJGYrKWUM66A0ywfRMEwNvbqY/kXPLynNvEiCL7sCCeN5LLsJJ +wx3tFvYk9CcbXFcx3FXuqB5vbKziRcxXV4p1VxngtViZSTYxPDMBbRZKzbgqg4SG +m/lg0h9tkQPTYKbVPZrdd5A9NaSfD171UkRpucC63M9933zZxKyGIjK8e2uR73r4 +F2iw4lNVYC2vPsKD2NkJK/DAZNuHi5HMkesE/Xa0lZrmFAYb1TQdvtj/dBxThZng +WVJKYe2InmtJiUZ+IFrZ50rlau7SZRFDAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTvkUz1pcMw6C8I6tNxIqSSaHh0 +2TAfBgNVHSMEGDAWgBTvkUz1pcMw6C8I6tNxIqSSaHh02TANBgkqhkiG9w0BAQsF +AAOCAgEAfj1U2iJdGlg+O1QnurrMyOMaauo++RLrVl89UM7g6kgmJs95Vn6RHJk/ +0KGRHCwPT5iVWVO90CLYiF2cN/z7ZMF4jIuaYAnq1fohX9B0ZedQxb8uuQsLrbWw +F6YSjNRieOpWauwK0kDDPAUwPk2Ut59KA9N9J0u2/kTO+hkzGm2kQtHdzMjI1xZS +g081lLMSVX3l4kLr5JyTCcBMWwerx20RoFAXlCOotQqSD7J6wWAsOMwaplv/8gzj +qh8c3LigkyfeY+N/IZ865Z764BNqdeuWXGKRlI5nU7aJ+BIJy29SWwNyhlCVCNSN +h4YVH5Uk2KRvms6knZtt0rJ2BobGVgjF6wnaNsIbW0G+YSrjcOa4pvi2WsS9Iff/ +ql+hbHY5ZtbqTFXhADObE5hjyW/QASAJN1LnDE8+zbz1X5YnpyACleAu6AdBBR8V +btaw5BngDwKTACdyxYvRVB9dSsNAl35VpnzBMwQUAR1JIGkLGZOdblgi90AMRgwj +Y/M50n92Uaf0yKHxDHYiI0ZSKS3io0EHVmmY0gUJvGnHWmHNj4FgFU2A3ZDifcRQ +8ow7bkrHxuaAKzyBvBGAFhAn1/DNP3nMcyrDflOR1m749fPH0FFNjkulW+YZFzvW +gQncItzujrnEj1PhZ7szuIgVRs/taTX/dQ1G885x4cVrhkIGuUE= +-----END CERTIFICATE----- + +# Issuer: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed +# Subject: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed +# Label: "OISTE WISeKey Global Root GB CA" +# Serial: 157768595616588414422159278966750757568 +# MD5 Fingerprint: a4:eb:b9:61:28:2e:b7:2f:98:b0:35:26:90:99:51:1d +# SHA1 Fingerprint: 0f:f9:40:76:18:d3:d7:6a:4b:98:f0:a8:35:9e:0c:fd:27:ac:cc:ed +# SHA256 Fingerprint: 6b:9c:08:e8:6e:b0:f7:67:cf:ad:65:cd:98:b6:21:49:e5:49:4a:67:f5:84:5e:7b:d1:ed:01:9f:27:b8:6b:d6 +-----BEGIN CERTIFICATE----- +MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBt +MQswCQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUg +Rm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9i +YWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAwMzJaFw0zOTEyMDExNTEwMzFaMG0x +CzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBG +b3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh +bCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3 +HEokKtaXscriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGx +WuR51jIjK+FTzJlFXHtPrby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX +1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNk +u7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4oQnc/nSMbsrY9gBQHTC5P +99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvgGUpuuy9r +M2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUB +BAMCAQAwDQYJKoZIhvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrgh +cViXfa43FK8+5/ea4n32cZiZBKpDdHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5 +gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0VQreUGdNZtGn//3ZwLWoo4rO +ZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEuiHZeeevJuQHHf +aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic +Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM= +-----END CERTIFICATE----- + +# Issuer: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. +# Subject: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. +# Label: "SZAFIR ROOT CA2" +# Serial: 357043034767186914217277344587386743377558296292 +# MD5 Fingerprint: 11:64:c1:89:b0:24:b1:8c:b1:07:7e:89:9e:51:9e:99 +# SHA1 Fingerprint: e2:52:fa:95:3f:ed:db:24:60:bd:6e:28:f3:9c:cc:cf:5e:b3:3f:de +# SHA256 Fingerprint: a1:33:9d:33:28:1a:0b:56:e5:57:d3:d3:2b:1c:e7:f9:36:7e:b0:94:bd:5f:a7:2a:7e:50:04:c8:de:d7:ca:fe +-----BEGIN CERTIFICATE----- +MIIDcjCCAlqgAwIBAgIUPopdB+xV0jLVt+O2XwHrLdzk1uQwDQYJKoZIhvcNAQEL +BQAwUTELMAkGA1UEBhMCUEwxKDAmBgNVBAoMH0tyYWpvd2EgSXpiYSBSb3psaWN6 +ZW5pb3dhIFMuQS4xGDAWBgNVBAMMD1NaQUZJUiBST09UIENBMjAeFw0xNTEwMTkw +NzQzMzBaFw0zNTEwMTkwNzQzMzBaMFExCzAJBgNVBAYTAlBMMSgwJgYDVQQKDB9L +cmFqb3dhIEl6YmEgUm96bGljemVuaW93YSBTLkEuMRgwFgYDVQQDDA9TWkFGSVIg +Uk9PVCBDQTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3vD5QqEvN +QLXOYeeWyrSh2gwisPq1e3YAd4wLz32ohswmUeQgPYUM1ljj5/QqGJ3a0a4m7utT +3PSQ1hNKDJA8w/Ta0o4NkjrcsbH/ON7Dui1fgLkCvUqdGw+0w8LBZwPd3BucPbOw +3gAeqDRHu5rr/gsUvTaE2g0gv/pby6kWIK05YO4vdbbnl5z5Pv1+TW9NL++IDWr6 +3fE9biCloBK0TXC5ztdyO4mTp4CEHCdJckm1/zuVnsHMyAHs6A6KCpbns6aH5db5 +BSsNl0BwPLqsdVqc1U2dAgrSS5tmS0YHF2Wtn2yIANwiieDhZNRnvDF5YTy7ykHN +XGoAyDw4jlivAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD +AgEGMB0GA1UdDgQWBBQuFqlKGLXLzPVvUPMjX/hd56zwyDANBgkqhkiG9w0BAQsF +AAOCAQEAtXP4A9xZWx126aMqe5Aosk3AM0+qmrHUuOQn/6mWmc5G4G18TKI4pAZw +8PRBEew/R40/cof5O/2kbytTAOD/OblqBw7rHRz2onKQy4I9EYKL0rufKq8h5mOG +nXkZ7/e7DDWQw4rtTw/1zBLZpD67oPwglV9PJi8RI4NOdQcPv5vRtB3pEAT+ymCP +oky4rc/hkA/NrgrHXXu3UNLUYfrVFdvXn4dRVOul4+vJhaAlIDf7js4MNIThPIGy +d05DpYhfhmehPea0XGG2Ptv+tyjFogeutcrKjSoS75ftwjCkySp6+/NNIxuZMzSg +LvWpCz/UXeHPhJ/iGcJfitYgHuNztw== +-----END CERTIFICATE----- + +# Issuer: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Subject: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Label: "Certum Trusted Network CA 2" +# Serial: 44979900017204383099463764357512596969 +# MD5 Fingerprint: 6d:46:9e:d9:25:6d:08:23:5b:5e:74:7d:1e:27:db:f2 +# SHA1 Fingerprint: d3:dd:48:3e:2b:bf:4c:05:e8:af:10:f5:fa:76:26:cf:d3:dc:30:92 +# SHA256 Fingerprint: b6:76:f2:ed:da:e8:77:5c:d3:6c:b0:f6:3c:d1:d4:60:39:61:f4:9e:62:65:ba:01:3a:2f:03:07:b6:d0:b8:04 +-----BEGIN CERTIFICATE----- +MIIF0jCCA7qgAwIBAgIQIdbQSk8lD8kyN/yqXhKN6TANBgkqhkiG9w0BAQ0FADCB +gDELMAkGA1UEBhMCUEwxIjAgBgNVBAoTGVVuaXpldG8gVGVjaG5vbG9naWVzIFMu +QS4xJzAlBgNVBAsTHkNlcnR1bSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEkMCIG +A1UEAxMbQ2VydHVtIFRydXN0ZWQgTmV0d29yayBDQSAyMCIYDzIwMTExMDA2MDgz +OTU2WhgPMjA0NjEwMDYwODM5NTZaMIGAMQswCQYDVQQGEwJQTDEiMCAGA1UEChMZ +VW5pemV0byBUZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MSQwIgYDVQQDExtDZXJ0dW0gVHJ1c3RlZCBOZXR3 +b3JrIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9+Xj45tWA +DGSdhhuWZGc/IjoedQF97/tcZ4zJzFxrqZHmuULlIEub2pt7uZld2ZuAS9eEQCsn +0+i6MLs+CRqnSZXvK0AkwpfHp+6bJe+oCgCXhVqqndwpyeI1B+twTUrWwbNWuKFB +OJvR+zF/j+Bf4bE/D44WSWDXBo0Y+aomEKsq09DRZ40bRr5HMNUuctHFY9rnY3lE +fktjJImGLjQ/KUxSiyqnwOKRKIm5wFv5HdnnJ63/mgKXwcZQkpsCLL2puTRZCr+E +Sv/f/rOf69me4Jgj7KZrdxYq28ytOxykh9xGc14ZYmhFV+SQgkK7QtbwYeDBoz1m +o130GO6IyY0XRSmZMnUCMe4pJshrAua1YkV/NxVaI2iJ1D7eTiew8EAMvE0Xy02i +sx7QBlrd9pPPV3WZ9fqGGmd4s7+W/jTcvedSVuWz5XV710GRBdxdaeOVDUO5/IOW +OZV7bIBaTxNyxtd9KXpEulKkKtVBRgkg/iKgtlswjbyJDNXXcPiHUv3a76xRLgez +Tv7QCdpw75j6VuZt27VXS9zlLCUVyJ4ueE742pyehizKV/Ma5ciSixqClnrDvFAS +adgOWkaLOusm+iPJtrCBvkIApPjW/jAux9JG9uWOdf3yzLnQh1vMBhBgu4M1t15n +3kfsmUjxpKEV/q2MYo45VU85FrmxY53/twIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBS2oVQ5AsOgP46KvPrU+Bym0ToO/TAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQENBQADggIBAHGlDs7k6b8/ONWJWsQCYftMxRQXLYtPU2sQ +F/xlhMcQSZDe28cmk4gmb3DWAl45oPePq5a1pRNcgRRtDoGCERuKTsZPpd1iHkTf +CVn0W3cLN+mLIMb4Ck4uWBzrM9DPhmDJ2vuAL55MYIR4PSFk1vtBHxgP58l1cb29 +XN40hz5BsA72udY/CROWFC/emh1auVbONTqwX3BNXuMp8SMoclm2q8KMZiYcdywm +djWLKKdpoPk79SPdhRB0yZADVpHnr7pH1BKXESLjokmUbOe3lEu6LaTaM4tMpkT/ +WjzGHWTYtTHkpjx6qFcL2+1hGsvxznN3Y6SHb0xRONbkX8eftoEq5IVIeVheO/jb +AoJnwTnbw3RLPTYe+SmTiGhbqEQZIfCn6IENLOiTNrQ3ssqwGyZ6miUfmpqAnksq +P/ujmv5zMnHCnsZy4YpoJ/HkD7TETKVhk/iXEAcqMCWpuchxuO9ozC1+9eB+D4Ko +b7a6bINDd82Kkhehnlt4Fj1F4jNy3eFmypnTycUm/Q1oBEauttmbjL4ZvrHG8hnj +XALKLNhvSgfZyTXaQHXyxKcZb55CEJh15pWLYLztxRLXis7VmFxWlgPF7ncGNf/P +5O4/E2Hu29othfDNrp2yGAlFw5Khchf8R7agCyzxxN5DaAhqXzvwdmP7zAYspsbi +DrW5viSP +-----END CERTIFICATE----- + +# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Subject: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Label: "Hellenic Academic and Research Institutions RootCA 2015" +# Serial: 0 +# MD5 Fingerprint: ca:ff:e2:db:03:d9:cb:4b:e9:0f:ad:84:fd:7b:18:ce +# SHA1 Fingerprint: 01:0c:06:95:a6:98:19:14:ff:bf:5f:c6:b0:b6:95:ea:29:e9:12:a6 +# SHA256 Fingerprint: a0:40:92:9a:02:ce:53:b4:ac:f4:f2:ff:c6:98:1c:e4:49:6f:75:5e:6d:45:fe:0b:2a:69:2b:cd:52:52:3f:36 +-----BEGIN CERTIFICATE----- +MIIGCzCCA/OgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBpjELMAkGA1UEBhMCR1Ix +DzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5k +IFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxQDA+BgNVBAMT +N0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgUm9v +dENBIDIwMTUwHhcNMTUwNzA3MTAxMTIxWhcNNDAwNjMwMTAxMTIxWjCBpjELMAkG +A1UEBhMCR1IxDzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNh +ZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkx +QDA+BgNVBAMTN0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1 +dGlvbnMgUm9vdENBIDIwMTUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQDC+Kk/G4n8PDwEXT2QNrCROnk8ZlrvbTkBSRq0t89/TSNTt5AA4xMqKKYx8ZEA +4yjsriFBzh/a/X0SWwGDD7mwX5nh8hKDgE0GPt+sr+ehiGsxr/CL0BgzuNtFajT0 +AoAkKAoCFZVedioNmToUW/bLy1O8E00BiDeUJRtCvCLYjqOWXjrZMts+6PAQZe10 +4S+nfK8nNLspfZu2zwnI5dMK/IhlZXQK3HMcXM1AsRzUtoSMTFDPaI6oWa7CJ06C +ojXdFPQf/7J31Ycvqm59JCfnxssm5uX+Zwdj2EUN3TpZZTlYepKZcj2chF6IIbjV +9Cz82XBST3i4vTwri5WY9bPRaM8gFH5MXF/ni+X1NYEZN9cRCLdmvtNKzoNXADrD +gfgXy5I2XdGj2HUb4Ysn6npIQf1FGQatJ5lOwXBH3bWfgVMS5bGMSF0xQxfjjMZ6 +Y5ZLKTBOhE5iGV48zpeQpX8B653g+IuJ3SWYPZK2fu/Z8VFRfS0myGlZYeCsargq +NhEEelC9MoS+L9xy1dcdFkfkR2YgP/SWxa+OAXqlD3pk9Q0Yh9muiNX6hME6wGko +LfINaFGq46V3xqSQDqE3izEjR8EJCOtu93ib14L8hCCZSRm2Ekax+0VVFqmjZayc +Bw/qa9wfLgZy7IaIEuQt218FL+TwA9MmM+eAws1CoRc0CwIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUcRVnyMjJvXVd +ctA4GGqd83EkVAswDQYJKoZIhvcNAQELBQADggIBAHW7bVRLqhBYRjTyYtcWNl0I +XtVsyIe9tC5G8jH4fOpCtZMWVdyhDBKg2mF+D1hYc2Ryx+hFjtyp8iY/xnmMsVMI +M4GwVhO+5lFc2JsKT0ucVlMC6U/2DWDqTUJV6HwbISHTGzrMd/K4kPFox/la/vot +9L/J9UUbzjgQKjeKeaO04wlshYaT/4mWJ3iBj2fjRnRUjtkNaeJK9E10A/+yd+2V +Z5fkscWrv2oj6NSU4kQoYsRL4vDY4ilrGnB+JGGTe08DMiUNRSQrlrRGar9KC/ea +j8GsGsVn82800vpzY4zvFrCopEYq+OsS7HK07/grfoxSwIuEVPkvPuNVqNxmsdnh +X9izjFk0WaSrT2y7HxjbdavYy5LNlDhhDgcGH0tGEPEVvo2FXDtKK4F5D7Rpn0lQ +l033DlZdwJVqwjbDG2jJ9SrcR5q+ss7FJej6A7na+RZukYT1HCjI/CbM1xyQVqdf +bzoEvM14iQuODy+jqk+iGxI9FghAD/FGTNeqewjBCvVtJ94Cj8rDtSvK6evIIVM4 +pcw72Hc3MKJP2W/R8kCtQXoXxdZKNYm3QdV8hn9VTYNKpXMgwDqvkPGaJI7ZjnHK +e7iG2rKPmT4dEw0SEe7Uq/DpFXYC5ODfqiAeW2GFZECpkJcNrVPSWh2HagCXZWK0 +vm9qp/UsQu0yrbYhnr68 +-----END CERTIFICATE----- + +# Issuer: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Subject: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Label: "Hellenic Academic and Research Institutions ECC RootCA 2015" +# Serial: 0 +# MD5 Fingerprint: 81:e5:b4:17:eb:c2:f5:e1:4b:0d:41:7b:49:92:fe:ef +# SHA1 Fingerprint: 9f:f1:71:8d:92:d5:9a:f3:7d:74:97:b4:bc:6f:84:68:0b:ba:b6:66 +# SHA256 Fingerprint: 44:b5:45:aa:8a:25:e6:5a:73:ca:15:dc:27:fc:36:d2:4c:1c:b9:95:3a:06:65:39:b1:15:82:dc:48:7b:48:33 +-----BEGIN CERTIFICATE----- +MIICwzCCAkqgAwIBAgIBADAKBggqhkjOPQQDAjCBqjELMAkGA1UEBhMCR1IxDzAN +BgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxRDBCBgNVBAMTO0hl +bGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgRUNDIFJv +b3RDQSAyMDE1MB4XDTE1MDcwNzEwMzcxMloXDTQwMDYzMDEwMzcxMlowgaoxCzAJ +BgNVBAYTAkdSMQ8wDQYDVQQHEwZBdGhlbnMxRDBCBgNVBAoTO0hlbGxlbmljIEFj +YWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9yaXR5 +MUQwQgYDVQQDEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0 +dXRpb25zIEVDQyBSb290Q0EgMjAxNTB2MBAGByqGSM49AgEGBSuBBAAiA2IABJKg +QehLgoRc4vgxEZmGZE4JJS+dQS8KrjVPdJWyUWRrjWvmP3CV8AVER6ZyOFB2lQJa +jq4onvktTpnvLEhvTCUp6NFxW98dwXU3tNf6e3pCnGoKVlp8aQuqgAkkbH7BRqNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFLQi +C4KZJAEOnLvkDv2/+5cgk5kqMAoGCCqGSM49BAMCA2cAMGQCMGfOFmI4oqxiRaep +lSTAGiecMjvAwNW6qef4BENThe5SId6d9SWDPp5YSy/XZxMOIQIwBeF1Ad5o7Sof +TUwJCA3sS61kFyjndc5FZXIhF8siQQ6ME5g4mlRtm8rifOoCWCKR +-----END CERTIFICATE----- + +# Issuer: CN=Certplus Root CA G1 O=Certplus +# Subject: CN=Certplus Root CA G1 O=Certplus +# Label: "Certplus Root CA G1" +# Serial: 1491911565779898356709731176965615564637713 +# MD5 Fingerprint: 7f:09:9c:f7:d9:b9:5c:69:69:56:d5:37:3e:14:0d:42 +# SHA1 Fingerprint: 22:fd:d0:b7:fd:a2:4e:0d:ac:49:2c:a0:ac:a6:7b:6a:1f:e3:f7:66 +# SHA256 Fingerprint: 15:2a:40:2b:fc:df:2c:d5:48:05:4d:22:75:b3:9c:7f:ca:3e:c0:97:80:78:b0:f0:ea:76:e5:61:a6:c7:43:3e +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgISESBVg+QtPlRWhS2DN7cs3EYRMA0GCSqGSIb3DQEBDQUA +MD4xCzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2Vy +dHBsdXMgUm9vdCBDQSBHMTAeFw0xNDA1MjYwMDAwMDBaFw0zODAxMTUwMDAwMDBa +MD4xCzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2Vy +dHBsdXMgUm9vdCBDQSBHMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB +ANpQh7bauKk+nWT6VjOaVj0W5QOVsjQcmm1iBdTYj+eJZJ+622SLZOZ5KmHNr49a +iZFluVj8tANfkT8tEBXgfs+8/H9DZ6itXjYj2JizTfNDnjl8KvzsiNWI7nC9hRYt +6kuJPKNxQv4c/dMcLRC4hlTqQ7jbxofaqK6AJc96Jh2qkbBIb6613p7Y1/oA/caP +0FG7Yn2ksYyy/yARujVjBYZHYEMzkPZHogNPlk2dT8Hq6pyi/jQu3rfKG3akt62f +6ajUeD94/vI4CTYd0hYCyOwqaK/1jpTvLRN6HkJKHRUxrgwEV/xhc/MxVoYxgKDE +EW4wduOU8F8ExKyHcomYxZ3MVwia9Az8fXoFOvpHgDm2z4QTd28n6v+WZxcIbekN +1iNQMLAVdBM+5S//Ds3EC0pd8NgAM0lm66EYfFkuPSi5YXHLtaW6uOrc4nBvCGrc +h2c0798wct3zyT8j/zXhviEpIDCB5BmlIOklynMxdCm+4kLV87ImZsdo/Rmz5yCT +mehd4F6H50boJZwKKSTUzViGUkAksnsPmBIgJPaQbEfIDbsYIC7Z/fyL8inqh3SV +4EJQeIQEQWGw9CEjjy3LKCHyamz0GqbFFLQ3ZU+V/YDI+HLlJWvEYLF7bY5KinPO +WftwenMGE9nTdDckQQoRb5fc5+R+ob0V8rqHDz1oihYHAgMBAAGjYzBhMA4GA1Ud +DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSowcCbkahDFXxd +Bie0KlHYlwuBsTAfBgNVHSMEGDAWgBSowcCbkahDFXxdBie0KlHYlwuBsTANBgkq +hkiG9w0BAQ0FAAOCAgEAnFZvAX7RvUz1isbwJh/k4DgYzDLDKTudQSk0YcbX8ACh +66Ryj5QXvBMsdbRX7gp8CXrc1cqh0DQT+Hern+X+2B50ioUHj3/MeXrKls3N/U/7 +/SMNkPX0XtPGYX2eEeAC7gkE2Qfdpoq3DIMku4NQkv5gdRE+2J2winq14J2by5BS +S7CTKtQ+FjPlnsZlFT5kOwQ/2wyPX1wdaR+v8+khjPPvl/aatxm2hHSco1S1cE5j +2FddUyGbQJJD+tZ3VTNPZNX70Cxqjm0lpu+F6ALEUz65noe8zDUa3qHpimOHZR4R +Kttjd5cUvpoUmRGywO6wT/gUITJDT5+rosuoD6o7BlXGEilXCNQ314cnrUlZp5Gr +RHpejXDbl85IULFzk/bwg2D5zfHhMf1bfHEhYxQUqq/F3pN+aLHsIqKqkHWetUNy +6mSjhEv9DKgma3GX7lZjZuhCVPnHHd/Qj1vfyDBviP4NxDMcU6ij/UgQ8uQKTuEV +V/xuZDDCVRHc6qnNSlSsKWNEz0pAoNZoWRsz+e86i9sgktxChL8Bq4fA1SCC28a5 +g4VCXA9DO2pJNdWY9BW/+mGBDAkgGNLQFwzLSABQ6XaCjGTXOqAHVcweMcDvOrRl +++O/QmueD6i9a5jc2NvLi6Td11n0bt3+qsOR0C5CB8AMTVPNJLFMWx5R9N/pkvo= +-----END CERTIFICATE----- + +# Issuer: CN=Certplus Root CA G2 O=Certplus +# Subject: CN=Certplus Root CA G2 O=Certplus +# Label: "Certplus Root CA G2" +# Serial: 1492087096131536844209563509228951875861589 +# MD5 Fingerprint: a7:ee:c4:78:2d:1b:ee:2d:b9:29:ce:d6:a7:96:32:31 +# SHA1 Fingerprint: 4f:65:8e:1f:e9:06:d8:28:02:e9:54:47:41:c9:54:25:5d:69:cc:1a +# SHA256 Fingerprint: 6c:c0:50:41:e6:44:5e:74:69:6c:4c:fb:c9:f8:0f:54:3b:7e:ab:bb:44:b4:ce:6f:78:7c:6a:99:71:c4:2f:17 +-----BEGIN CERTIFICATE----- +MIICHDCCAaKgAwIBAgISESDZkc6uo+jF5//pAq/Pc7xVMAoGCCqGSM49BAMDMD4x +CzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2VydHBs +dXMgUm9vdCBDQSBHMjAeFw0xNDA1MjYwMDAwMDBaFw0zODAxMTUwMDAwMDBaMD4x +CzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2VydHBs +dXMgUm9vdCBDQSBHMjB2MBAGByqGSM49AgEGBSuBBAAiA2IABM0PW1aC3/BFGtat +93nwHcmsltaeTpwftEIRyoa/bfuFo8XlGVzX7qY/aWfYeOKmycTbLXku54uNAm8x +Ik0G42ByRZ0OQneezs/lf4WbGOT8zC5y0xaTTsqZY1yhBSpsBqNjMGEwDgYDVR0P +AQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNqDYwJ5jtpMxjwj +FNiPwyCrKGBZMB8GA1UdIwQYMBaAFNqDYwJ5jtpMxjwjFNiPwyCrKGBZMAoGCCqG +SM49BAMDA2gAMGUCMHD+sAvZ94OX7PNVHdTcswYO/jOYnYs5kGuUIe22113WTNch +p+e/IQ8rzfcq3IUHnQIxAIYUFuXcsGXCwI4Un78kFmjlvPl5adytRSv3tjFzzAal +U5ORGpOucGpnutee5WEaXw== +-----END CERTIFICATE----- + +# Issuer: CN=OpenTrust Root CA G1 O=OpenTrust +# Subject: CN=OpenTrust Root CA G1 O=OpenTrust +# Label: "OpenTrust Root CA G1" +# Serial: 1492036577811947013770400127034825178844775 +# MD5 Fingerprint: 76:00:cc:81:29:cd:55:5e:88:6a:7a:2e:f7:4d:39:da +# SHA1 Fingerprint: 79:91:e8:34:f7:e2:ee:dd:08:95:01:52:e9:55:2d:14:e9:58:d5:7e +# SHA256 Fingerprint: 56:c7:71:28:d9:8c:18:d9:1b:4c:fd:ff:bc:25:ee:91:03:d4:75:8e:a2:ab:ad:82:6a:90:f3:45:7d:46:0e:b4 +-----BEGIN CERTIFICATE----- +MIIFbzCCA1egAwIBAgISESCzkFU5fX82bWTCp59rY45nMA0GCSqGSIb3DQEBCwUA +MEAxCzAJBgNVBAYTAkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9w +ZW5UcnVzdCBSb290IENBIEcxMB4XDTE0MDUyNjA4NDU1MFoXDTM4MDExNTAwMDAw +MFowQDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCU9wZW5UcnVzdDEdMBsGA1UEAwwU +T3BlblRydXN0IFJvb3QgQ0EgRzEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQD4eUbalsUwXopxAy1wpLuwxQjczeY1wICkES3d5oeuXT2R0odsN7faYp6b +wiTXj/HbpqbfRm9RpnHLPhsxZ2L3EVs0J9V5ToybWL0iEA1cJwzdMOWo010hOHQX +/uMftk87ay3bfWAfjH1MBcLrARYVmBSO0ZB3Ij/swjm4eTrwSSTilZHcYTSSjFR0 +77F9jAHiOH3BX2pfJLKOYheteSCtqx234LSWSE9mQxAGFiQD4eCcjsZGT44ameGP +uY4zbGneWK2gDqdkVBFpRGZPTBKnjix9xNRbxQA0MMHZmf4yzgeEtE7NCv82TWLx +p2NX5Ntqp66/K7nJ5rInieV+mhxNaMbBGN4zK1FGSxyO9z0M+Yo0FMT7MzUj8czx +Kselu7Cizv5Ta01BG2Yospb6p64KTrk5M0ScdMGTHPjgniQlQ/GbI4Kq3ywgsNw2 +TgOzfALU5nsaqocTvz6hdLubDuHAk5/XpGbKuxs74zD0M1mKB3IDVedzagMxbm+W +G+Oin6+Sx+31QrclTDsTBM8clq8cIqPQqwWyTBIjUtz9GVsnnB47ev1CI9sjgBPw +vFEVVJSmdz7QdFG9URQIOTfLHzSpMJ1ShC5VkLG631UAC9hWLbFJSXKAqWLXwPYY +EQRVzXR7z2FwefR7LFxckvzluFqrTJOVoSfupb7PcSNCupt2LQIDAQABo2MwYTAO +BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUl0YhVyE1 +2jZVx/PxN3DlCPaTKbYwHwYDVR0jBBgwFoAUl0YhVyE12jZVx/PxN3DlCPaTKbYw +DQYJKoZIhvcNAQELBQADggIBAB3dAmB84DWn5ph76kTOZ0BP8pNuZtQ5iSas000E +PLuHIT839HEl2ku6q5aCgZG27dmxpGWX4m9kWaSW7mDKHyP7Rbr/jyTwyqkxf3kf +gLMtMrpkZ2CvuVnN35pJ06iCsfmYlIrM4LvgBBuZYLFGZdwIorJGnkSI6pN+VxbS +FXJfLkur1J1juONI5f6ELlgKn0Md/rcYkoZDSw6cMoYsYPXpSOqV7XAp8dUv/TW0 +V8/bhUiZucJvbI/NeJWsZCj9VrDDb8O+WVLhX4SPgPL0DTatdrOjteFkdjpY3H1P +XlZs5VVZV6Xf8YpmMIzUUmI4d7S+KNfKNsSbBfD4Fdvb8e80nR14SohWZ25g/4/I +i+GOvUKpMwpZQhISKvqxnUOOBZuZ2mKtVzazHbYNeS2WuOvyDEsMpZTGMKcmGS3t +TAZQMPH9WD25SxdfGbRqhFS0OE85og2WaMMolP3tLR9Ka0OWLpABEPs4poEL0L91 +09S5zvE/bw4cHjdx5RiHdRk/ULlepEU0rbDK5uUTdg8xFKmOLZTW1YVNcxVPS/Ky +Pu1svf0OnWZzsD2097+o4BGkxK51CUpjAEggpsadCwmKtODmzj7HPiY46SvepghJ +AwSQiumPv+i2tCqjI40cHLI5kqiPAlxAOXXUc0ECd97N4EOH1uS6SsNsEn/+KuYj +1oxx +-----END CERTIFICATE----- + +# Issuer: CN=OpenTrust Root CA G2 O=OpenTrust +# Subject: CN=OpenTrust Root CA G2 O=OpenTrust +# Label: "OpenTrust Root CA G2" +# Serial: 1492012448042702096986875987676935573415441 +# MD5 Fingerprint: 57:24:b6:59:24:6b:ae:c8:fe:1c:0c:20:f2:c0:4e:eb +# SHA1 Fingerprint: 79:5f:88:60:c5:ab:7c:3d:92:e6:cb:f4:8d:e1:45:cd:11:ef:60:0b +# SHA256 Fingerprint: 27:99:58:29:fe:6a:75:15:c1:bf:e8:48:f9:c4:76:1d:b1:6c:22:59:29:25:7b:f4:0d:08:94:f2:9e:a8:ba:f2 +-----BEGIN CERTIFICATE----- +MIIFbzCCA1egAwIBAgISESChaRu/vbm9UpaPI+hIvyYRMA0GCSqGSIb3DQEBDQUA +MEAxCzAJBgNVBAYTAkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9w +ZW5UcnVzdCBSb290IENBIEcyMB4XDTE0MDUyNjAwMDAwMFoXDTM4MDExNTAwMDAw +MFowQDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCU9wZW5UcnVzdDEdMBsGA1UEAwwU +T3BlblRydXN0IFJvb3QgQ0EgRzIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQDMtlelM5QQgTJT32F+D3Y5z1zCU3UdSXqWON2ic2rxb95eolq5cSG+Ntmh +/LzubKh8NBpxGuga2F8ORAbtp+Dz0mEL4DKiltE48MLaARf85KxP6O6JHnSrT78e +CbY2albz4e6WiWYkBuTNQjpK3eCasMSCRbP+yatcfD7J6xcvDH1urqWPyKwlCm/6 +1UWY0jUJ9gNDlP7ZvyCVeYCYitmJNbtRG6Q3ffyZO6v/v6wNj0OxmXsWEH4db0fE +FY8ElggGQgT4hNYdvJGmQr5J1WqIP7wtUdGejeBSzFfdNTVY27SPJIjki9/ca1TS +gSuyzpJLHB9G+h3Ykst2Z7UJmQnlrBcUVXDGPKBWCgOz3GIZ38i1MH/1PCZ1Eb3X +G7OHngevZXHloM8apwkQHZOJZlvoPGIytbU6bumFAYueQ4xncyhZW+vj3CzMpSZy +YhK05pyDRPZRpOLAeiRXyg6lPzq1O4vldu5w5pLeFlwoW5cZJ5L+epJUzpM5ChaH +vGOz9bGTXOBut9Dq+WIyiET7vycotjCVXRIouZW+j1MY5aIYFuJWpLIsEPUdN6b4 +t/bQWVyJ98LVtZR00dX+G7bw5tYee9I8y6jj9RjzIR9u701oBnstXW5DiabA+aC/ +gh7PU3+06yzbXfZqfUAkBXKJOAGTy3HCOV0GEfZvePg3DTmEJwIDAQABo2MwYTAO +BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUajn6QiL3 +5okATV59M4PLuG53hq8wHwYDVR0jBBgwFoAUajn6QiL35okATV59M4PLuG53hq8w +DQYJKoZIhvcNAQENBQADggIBAJjLq0A85TMCl38th6aP1F5Kr7ge57tx+4BkJamz +Gj5oXScmp7oq4fBXgwpkTx4idBvpkF/wrM//T2h6OKQQbA2xx6R3gBi2oihEdqc0 +nXGEL8pZ0keImUEiyTCYYW49qKgFbdEfwFFEVn8nNQLdXpgKQuswv42hm1GqO+qT +RmTFAHneIWv2V6CG1wZy7HBGS4tz3aAhdT7cHcCP009zHIXZ/n9iyJVvttN7jLpT +wm+bREx50B1ws9efAvSyB7DH5fitIw6mVskpEndI2S9G/Tvw/HRwkqWOOAgfZDC2 +t0v7NqwQjqBSM2OdAzVWxWm9xiNaJ5T2pBL4LTM8oValX9YZ6e18CL13zSdkzJTa +TkZQh+D5wVOAHrut+0dSixv9ovneDiK3PTNZbNTe9ZUGMg1RGUFcPk8G97krgCf2 +o6p6fAbhQ8MTOWIaNr3gKC6UAuQpLmBVrkA9sHSSXvAgZJY/X0VdiLWK2gKgW0VU +3jg9CcCoSmVGFvyqv1ROTVu+OEO3KMqLM6oaJbolXCkvW0pujOotnCr2BXbgd5eA +iN1nE28daCSLT7d0geX0YJ96Vdc+N9oWaz53rK4YcJUIeSkDiv7BO7M/Gg+kO14f +WKGVyasvc0rQLW6aWQ9VGHgtPFGml4vmu7JwqkwR3v98KzfUetF3NI/n+UL3PIEM +S1IK +-----END CERTIFICATE----- + +# Issuer: CN=OpenTrust Root CA G3 O=OpenTrust +# Subject: CN=OpenTrust Root CA G3 O=OpenTrust +# Label: "OpenTrust Root CA G3" +# Serial: 1492104908271485653071219941864171170455615 +# MD5 Fingerprint: 21:37:b4:17:16:92:7b:67:46:70:a9:96:d7:a8:13:24 +# SHA1 Fingerprint: 6e:26:64:f3:56:bf:34:55:bf:d1:93:3f:7c:01:de:d8:13:da:8a:a6 +# SHA256 Fingerprint: b7:c3:62:31:70:6e:81:07:8c:36:7c:b8:96:19:8f:1e:32:08:dd:92:69:49:dd:8f:57:09:a4:10:f7:5b:62:92 +-----BEGIN CERTIFICATE----- +MIICITCCAaagAwIBAgISESDm+Ez8JLC+BUCs2oMbNGA/MAoGCCqGSM49BAMDMEAx +CzAJBgNVBAYTAkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9wZW5U +cnVzdCBSb290IENBIEczMB4XDTE0MDUyNjAwMDAwMFoXDTM4MDExNTAwMDAwMFow +QDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCU9wZW5UcnVzdDEdMBsGA1UEAwwUT3Bl +blRydXN0IFJvb3QgQ0EgRzMwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARK7liuTcpm +3gY6oxH84Bjwbhy6LTAMidnW7ptzg6kjFYwvWYpa3RTqnVkrQ7cG7DK2uu5Bta1d +oYXM6h0UZqNnfkbilPPntlahFVmhTzeXuSIevRHr9LIfXsMUmuXZl5mjYzBhMA4G +A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRHd8MUi2I5 +DMlv4VBN0BBY3JWIbTAfBgNVHSMEGDAWgBRHd8MUi2I5DMlv4VBN0BBY3JWIbTAK +BggqhkjOPQQDAwNpADBmAjEAj6jcnboMBBf6Fek9LykBl7+BFjNAk2z8+e2AcG+q +j9uEwov1NcoG3GRvaBbhj5G5AjEA2Euly8LQCGzpGPta3U1fJAuwACEl74+nBCZx +4nxp5V2a+EEfOzmTk51V6s2N8fvB +-----END CERTIFICATE----- + +# Issuer: CN=ISRG Root X1 O=Internet Security Research Group +# Subject: CN=ISRG Root X1 O=Internet Security Research Group +# Label: "ISRG Root X1" +# Serial: 172886928669790476064670243504169061120 +# MD5 Fingerprint: 0c:d2:f9:e0:da:17:73:e9:ed:86:4d:a5:e3:70:e7:4e +# SHA1 Fingerprint: ca:bd:2a:79:a1:07:6a:31:f2:1d:25:36:35:cb:03:9d:43:29:a5:e8 +# SHA256 Fingerprint: 96:bc:ec:06:26:49:76:f3:74:60:77:9a:cf:28:c5:a7:cf:e8:a3:c0:aa:e1:1a:8f:fc:ee:05:c0:bd:df:08:c6 +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw +TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh +cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4 +WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu +ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc +h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+ +0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U +A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW +T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH +B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC +B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv +KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn +OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn +jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw +qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI +rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq +hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL +ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ +3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK +NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5 +ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur +TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC +jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc +oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq +4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA +mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d +emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc= +-----END CERTIFICATE----- + +# Issuer: O=FNMT-RCM OU=AC RAIZ FNMT-RCM +# Subject: O=FNMT-RCM OU=AC RAIZ FNMT-RCM +# Label: "AC RAIZ FNMT-RCM" +# Serial: 485876308206448804701554682760554759 +# MD5 Fingerprint: e2:09:04:b4:d3:bd:d1:a0:14:fd:1a:d2:47:c4:57:1d +# SHA1 Fingerprint: ec:50:35:07:b2:15:c4:95:62:19:e2:a8:9a:5b:42:99:2c:4c:2c:20 +# SHA256 Fingerprint: eb:c5:57:0c:29:01:8c:4d:67:b1:aa:12:7b:af:12:f7:03:b4:61:1e:bc:17:b7:da:b5:57:38:94:17:9b:93:fa +-----BEGIN CERTIFICATE----- +MIIFgzCCA2ugAwIBAgIPXZONMGc2yAYdGsdUhGkHMA0GCSqGSIb3DQEBCwUAMDsx +CzAJBgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJ +WiBGTk1ULVJDTTAeFw0wODEwMjkxNTU5NTZaFw0zMDAxMDEwMDAwMDBaMDsxCzAJ +BgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJWiBG +Tk1ULVJDTTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALpxgHpMhm5/ +yBNtwMZ9HACXjywMI7sQmkCpGreHiPibVmr75nuOi5KOpyVdWRHbNi63URcfqQgf +BBckWKo3Shjf5TnUV/3XwSyRAZHiItQDwFj8d0fsjz50Q7qsNI1NOHZnjrDIbzAz +WHFctPVrbtQBULgTfmxKo0nRIBnuvMApGGWn3v7v3QqQIecaZ5JCEJhfTzC8PhxF +tBDXaEAUwED653cXeuYLj2VbPNmaUtu1vZ5Gzz3rkQUCwJaydkxNEJY7kvqcfw+Z +374jNUUeAlz+taibmSXaXvMiwzn15Cou08YfxGyqxRxqAQVKL9LFwag0Jl1mpdIC +IfkYtwb1TplvqKtMUejPUBjFd8g5CSxJkjKZqLsXF3mwWsXmo8RZZUc1g16p6DUL +mbvkzSDGm0oGObVo/CK67lWMK07q87Hj/LaZmtVC+nFNCM+HHmpxffnTtOmlcYF7 +wk5HlqX2doWjKI/pgG6BU6VtX7hI+cL5NqYuSf+4lsKMB7ObiFj86xsc3i1w4peS +MKGJ47xVqCfWS+2QrYv6YyVZLag13cqXM7zlzced0ezvXg5KkAYmY6252TUtB7p2 +ZSysV4999AeU14ECll2jB0nVetBX+RvnU0Z1qrB5QstocQjpYL05ac70r8NWQMet +UqIJ5G+GR4of6ygnXYMgrwTJbFaai0b1AgMBAAGjgYMwgYAwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFPd9xf3E6Jobd2Sn9R2gzL+H +YJptMD4GA1UdIAQ3MDUwMwYEVR0gADArMCkGCCsGAQUFBwIBFh1odHRwOi8vd3d3 +LmNlcnQuZm5tdC5lcy9kcGNzLzANBgkqhkiG9w0BAQsFAAOCAgEAB5BK3/MjTvDD +nFFlm5wioooMhfNzKWtN/gHiqQxjAb8EZ6WdmF/9ARP67Jpi6Yb+tmLSbkyU+8B1 +RXxlDPiyN8+sD8+Nb/kZ94/sHvJwnvDKuO+3/3Y3dlv2bojzr2IyIpMNOmqOFGYM +LVN0V2Ue1bLdI4E7pWYjJ2cJj+F3qkPNZVEI7VFY/uY5+ctHhKQV8Xa7pO6kO8Rf +77IzlhEYt8llvhjho6Tc+hj507wTmzl6NLrTQfv6MooqtyuGC2mDOL7Nii4LcK2N +JpLuHvUBKwrZ1pebbuCoGRw6IYsMHkCtA+fdZn71uSANA+iW+YJF1DngoABd15jm +fZ5nc8OaKveri6E6FO80vFIOiZiaBECEHX5FaZNXzuvO+FB8TxxuBEOb+dY7Ixjp +6o7RTUaN8Tvkasq6+yO3m/qZASlaWFot4/nUbQ4mrcFuNLwy+AwF+mWj2zs3gyLp +1txyM/1d8iC9djwj2ij3+RvrWWTV3F9yfiD8zYm1kGdNYno/Tq0dwzn+evQoFt9B +9kiABdcPUXmsEKvU7ANm5mqwujGSQkBqvjrTcuFqN1W8rB2Vt2lh8kORdOag0wok +RqEIr9baRRmW1FMdW4R58MD3R++Lj8UGrp1MYp3/RgT408m2ECVAdf4WqslKYIYv +uu8wd+RU4riEmViAqhOLUTpPSPaLtrM= +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 1 O=Amazon +# Subject: CN=Amazon Root CA 1 O=Amazon +# Label: "Amazon Root CA 1" +# Serial: 143266978916655856878034712317230054538369994 +# MD5 Fingerprint: 43:c6:bf:ae:ec:fe:ad:2f:18:c6:88:68:30:fc:c8:e6 +# SHA1 Fingerprint: 8d:a7:f9:65:ec:5e:fc:37:91:0f:1c:6e:59:fd:c1:cc:6a:6e:de:16 +# SHA256 Fingerprint: 8e:cd:e6:88:4f:3d:87:b1:12:5b:a3:1a:c3:fc:b1:3d:70:16:de:7f:57:cc:90:4f:e1:cb:97:c6:ae:98:19:6e +-----BEGIN CERTIFICATE----- +MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF +ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 +b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL +MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv +b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj +ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM +9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw +IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6 +VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L +93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm +jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA +A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI +U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs +N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv +o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU +5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy +rqXRfboQnoZsG4q5WTP468SQvvG5 +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 2 O=Amazon +# Subject: CN=Amazon Root CA 2 O=Amazon +# Label: "Amazon Root CA 2" +# Serial: 143266982885963551818349160658925006970653239 +# MD5 Fingerprint: c8:e5:8d:ce:a8:42:e2:7a:c0:2a:5c:7c:9e:26:bf:66 +# SHA1 Fingerprint: 5a:8c:ef:45:d7:a6:98:59:76:7a:8c:8b:44:96:b5:78:cf:47:4b:1a +# SHA256 Fingerprint: 1b:a5:b2:aa:8c:65:40:1a:82:96:01:18:f8:0b:ec:4f:62:30:4d:83:ce:c4:71:3a:19:c3:9c:01:1e:a4:6d:b4 +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgITBmyf0pY1hp8KD+WGePhbJruKNzANBgkqhkiG9w0BAQwF +ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 +b24gUm9vdCBDQSAyMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTEL +MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv +b3QgQ0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK2Wny2cSkxK +gXlRmeyKy2tgURO8TW0G/LAIjd0ZEGrHJgw12MBvIITplLGbhQPDW9tK6Mj4kHbZ +W0/jTOgGNk3Mmqw9DJArktQGGWCsN0R5hYGCrVo34A3MnaZMUnbqQ523BNFQ9lXg +1dKmSYXpN+nKfq5clU1Imj+uIFptiJXZNLhSGkOQsL9sBbm2eLfq0OQ6PBJTYv9K +8nu+NQWpEjTj82R0Yiw9AElaKP4yRLuH3WUnAnE72kr3H9rN9yFVkE8P7K6C4Z9r +2UXTu/Bfh+08LDmG2j/e7HJV63mjrdvdfLC6HM783k81ds8P+HgfajZRRidhW+me +z/CiVX18JYpvL7TFz4QuK/0NURBs+18bvBt+xa47mAExkv8LV/SasrlX6avvDXbR +8O70zoan4G7ptGmh32n2M8ZpLpcTnqWHsFcQgTfJU7O7f/aS0ZzQGPSSbtqDT6Zj +mUyl+17vIWR6IF9sZIUVyzfpYgwLKhbcAS4y2j5L9Z469hdAlO+ekQiG+r5jqFoz +7Mt0Q5X5bGlSNscpb/xVA1wf+5+9R+vnSUeVC06JIglJ4PVhHvG/LopyboBZ/1c6 ++XUyo05f7O0oYtlNc/LMgRdg7c3r3NunysV+Ar3yVAhU/bQtCSwXVEqY0VThUWcI +0u1ufm8/0i2BWSlmy5A5lREedCf+3euvAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMB +Af8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSwDPBMMPQFWAJI/TPlUq9LhONm +UjANBgkqhkiG9w0BAQwFAAOCAgEAqqiAjw54o+Ci1M3m9Zh6O+oAA7CXDpO8Wqj2 +LIxyh6mx/H9z/WNxeKWHWc8w4Q0QshNabYL1auaAn6AFC2jkR2vHat+2/XcycuUY ++gn0oJMsXdKMdYV2ZZAMA3m3MSNjrXiDCYZohMr/+c8mmpJ5581LxedhpxfL86kS +k5Nrp+gvU5LEYFiwzAJRGFuFjWJZY7attN6a+yb3ACfAXVU3dJnJUH/jWS5E4ywl +7uxMMne0nxrpS10gxdr9HIcWxkPo1LsmmkVwXqkLN1PiRnsn/eBG8om3zEK2yygm +btmlyTrIQRNg91CMFa6ybRoVGld45pIq2WWQgj9sAq+uEjonljYE1x2igGOpm/Hl +urR8FLBOybEfdF849lHqm/osohHUqS0nGkWxr7JOcQ3AWEbWaQbLU8uz/mtBzUF+ +fUwPfHJ5elnNXkoOrJupmHN5fLT0zLm4BwyydFy4x2+IoZCn9Kr5v2c69BoVYh63 +n749sSmvZ6ES8lgQGVMDMBu4Gon2nL2XA46jCfMdiyHxtN/kHNGfZQIG6lzWE7OE +76KlXIx3KadowGuuQNKotOrN8I1LOJwZmhsoVLiJkO/KdYE+HvJkJMcYr07/R54H +9jVlpNMKVv/1F2Rs76giJUmTtt8AF9pYfl3uxRuw0dFfIRDH+fO6AgonB8Xx1sfT +4PsJYGw= +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 3 O=Amazon +# Subject: CN=Amazon Root CA 3 O=Amazon +# Label: "Amazon Root CA 3" +# Serial: 143266986699090766294700635381230934788665930 +# MD5 Fingerprint: a0:d4:ef:0b:f7:b5:d8:49:95:2a:ec:f5:c4:fc:81:87 +# SHA1 Fingerprint: 0d:44:dd:8c:3c:8c:1a:1a:58:75:64:81:e9:0f:2e:2a:ff:b3:d2:6e +# SHA256 Fingerprint: 18:ce:6c:fe:7b:f1:4e:60:b2:e3:47:b8:df:e8:68:cb:31:d0:2e:bb:3a:da:27:15:69:f5:03:43:b4:6d:b3:a4 +-----BEGIN CERTIFICATE----- +MIIBtjCCAVugAwIBAgITBmyf1XSXNmY/Owua2eiedgPySjAKBggqhkjOPQQDAjA5 +MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g +Um9vdCBDQSAzMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG +A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg +Q0EgMzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCmXp8ZBf8ANm+gBG1bG8lKl +ui2yEujSLtf6ycXYqm0fc4E7O5hrOXwzpcVOho6AF2hiRVd9RFgdszflZwjrZt6j +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSr +ttvXBp43rDCGB5Fwx5zEGbF4wDAKBggqhkjOPQQDAgNJADBGAiEA4IWSoxe3jfkr +BqWTrBqYaGFy+uGh0PsceGCmQ5nFuMQCIQCcAu/xlJyzlvnrxir4tiz+OpAUFteM +YyRIHN8wfdVoOw== +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 4 O=Amazon +# Subject: CN=Amazon Root CA 4 O=Amazon +# Label: "Amazon Root CA 4" +# Serial: 143266989758080763974105200630763877849284878 +# MD5 Fingerprint: 89:bc:27:d5:eb:17:8d:06:6a:69:d5:fd:89:47:b4:cd +# SHA1 Fingerprint: f6:10:84:07:d6:f8:bb:67:98:0c:c2:e2:44:c2:eb:ae:1c:ef:63:be +# SHA256 Fingerprint: e3:5d:28:41:9e:d0:20:25:cf:a6:90:38:cd:62:39:62:45:8d:a5:c6:95:fb:de:a3:c2:2b:0b:fb:25:89:70:92 +-----BEGIN CERTIFICATE----- +MIIB8jCCAXigAwIBAgITBmyf18G7EEwpQ+Vxe3ssyBrBDjAKBggqhkjOPQQDAzA5 +MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g +Um9vdCBDQSA0MB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG +A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg +Q0EgNDB2MBAGByqGSM49AgEGBSuBBAAiA2IABNKrijdPo1MN/sGKe0uoe0ZLY7Bi +9i0b2whxIdIA6GO9mif78DluXeo9pcmBqqNbIJhFXRbb/egQbeOc4OO9X4Ri83Bk +M6DLJC9wuoihKqB1+IGuYgbEgds5bimwHvouXKNCMEAwDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFNPsxzplbszh2naaVvuc84ZtV+WB +MAoGCCqGSM49BAMDA2gAMGUCMDqLIfG9fhGt0O9Yli/W651+kI0rz2ZVwyzjKKlw +CkcO8DdZEv8tmZQoTipPNU0zWgIxAOp1AE47xDqUEpHJWEadIRNyp4iciuRMStuW +1KyLa2tJElMzrdfkviT8tQp21KW8EA== +-----END CERTIFICATE----- + +# Issuer: CN=LuxTrust Global Root 2 O=LuxTrust S.A. +# Subject: CN=LuxTrust Global Root 2 O=LuxTrust S.A. +# Label: "LuxTrust Global Root 2" +# Serial: 59914338225734147123941058376788110305822489521 +# MD5 Fingerprint: b2:e1:09:00:61:af:f7:f1:91:6f:c4:ad:8d:5e:3b:7c +# SHA1 Fingerprint: 1e:0e:56:19:0a:d1:8b:25:98:b2:04:44:ff:66:8a:04:17:99:5f:3f +# SHA256 Fingerprint: 54:45:5f:71:29:c2:0b:14:47:c4:18:f9:97:16:8f:24:c5:8f:c5:02:3b:f5:da:5b:e2:eb:6e:1d:d8:90:2e:d5 +-----BEGIN CERTIFICATE----- +MIIFwzCCA6ugAwIBAgIUCn6m30tEntpqJIWe5rgV0xZ/u7EwDQYJKoZIhvcNAQEL +BQAwRjELMAkGA1UEBhMCTFUxFjAUBgNVBAoMDUx1eFRydXN0IFMuQS4xHzAdBgNV +BAMMFkx1eFRydXN0IEdsb2JhbCBSb290IDIwHhcNMTUwMzA1MTMyMTU3WhcNMzUw +MzA1MTMyMTU3WjBGMQswCQYDVQQGEwJMVTEWMBQGA1UECgwNTHV4VHJ1c3QgUy5B +LjEfMB0GA1UEAwwWTHV4VHJ1c3QgR2xvYmFsIFJvb3QgMjCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBANeFl78RmOnwYoNMPIf5U2o3C/IPPIfOb9wmKb3F +ibrJgz337spbxm1Jc7TJRqMbNBM/wYlFV/TZsfs2ZUv7COJIcRHIbjuend+JZTem +hfY7RBi2xjcwYkSSl2l9QjAk5A0MiWtj3sXh306pFGxT4GHO9hcvHTy95iJMHZP1 +EMShduxq3sVs35a0VkBCwGKSMKEtFZSg0iAGCW5qbeXrt77U8PEVfIvmTroTzEsn +Xpk8F12PgX8zPU/TPxvsXD/wPEx1bvKm1Z3aLQdjAsZy6ZS8TEmVT4hSyNvoaYL4 +zDRbIvCGp4m9SAptZoFtyMhk+wHh9OHe2Z7d21vUKpkmFRseTJIpgp7VkoGSQXAZ +96Tlk0u8d2cx3Rz9MXANF5kM+Qw5GSoXtTBxVdUPrljhPS80m8+f9niFwpN6cj5m +j5wWEWCPnolvZ77gR1o7DJpni89Gxq44o/KnvObWhWszJHAiS8sIm7vI+AIpHb4g +DEa/a4ebsypmQjVGbKq6rfmYe+lQVRQxv7HaLe2ArWgk+2mr2HETMOZns4dA/Yl+ +8kPREd8vZS9kzl8UubG/Mb2HeFpZZYiq/FkySIbWTLkpS5XTdvN3JW1CHDiDTf2j +X5t/Lax5Gw5CMZdjpPuKadUiDTSQMC6otOBttpSsvItO13D8xTiOZCXhTTmQzsmH +hFhxAgMBAAGjgagwgaUwDwYDVR0TAQH/BAUwAwEB/zBCBgNVHSAEOzA5MDcGByuB +KwEBAQowLDAqBggrBgEFBQcCARYeaHR0cHM6Ly9yZXBvc2l0b3J5Lmx1eHRydXN0 +Lmx1MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBT/GCh2+UgFLKGu8SsbK7JT ++Et8szAdBgNVHQ4EFgQU/xgodvlIBSyhrvErGyuyU/hLfLMwDQYJKoZIhvcNAQEL +BQADggIBAGoZFO1uecEsh9QNcH7X9njJCwROxLHOk3D+sFTAMs2ZMGQXvw/l4jP9 +BzZAcg4atmpZ1gDlaCDdLnINH2pkMSCEfUmmWjfrRcmF9dTHF5kH5ptV5AzoqbTO +jFu1EVzPig4N1qx3gf4ynCSecs5U89BvolbW7MM3LGVYvlcAGvI1+ut7MV3CwRI9 +loGIlonBWVx65n9wNOeD4rHh4bhY79SV5GCc8JaXcozrhAIuZY+kt9J/Z93I055c +qqmkoCUUBpvsT34tC38ddfEz2O3OuHVtPlu5mB0xDVbYQw8wkbIEa91WvpWAVWe+ +2M2D2RjuLg+GLZKecBPs3lHJQ3gCpU3I+V/EkVhGFndadKpAvAefMLmx9xIX3eP/ +JEAdemrRTxgKqpAd60Ae36EeRJIQmvKN4dFLRp7oRUKX6kWZ8+xm1QL68qZKJKre +zrnK+T+Tb/mjuuqlPpmt/f97mfVl7vBZKGfXkJWkE4SphMHozs51k2MavDzq1WQf +LSoSOcbDWjLtR5EWDrw4wVDej8oqkDQc7kGUnF4ZLvhFSZl0kbAEb+MEWrGrKqv+ +x9CWttrhSmQGbmBNvUJO/3jaJMobtNeWOWyu8Q6qp31IiyBMz2TWuJdGsE7RKlY6 +oJO9r4Ak4Ap+58rVyuiFVdw2KuGUaJPHZnJED4AhMmwlxyOAgwrr +-----END CERTIFICATE----- + +# Issuer: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM +# Subject: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM +# Label: "TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1" +# Serial: 1 +# MD5 Fingerprint: dc:00:81:dc:69:2f:3e:2f:b0:3b:f6:3d:5a:91:8e:49 +# SHA1 Fingerprint: 31:43:64:9b:ec:ce:27:ec:ed:3a:3f:0b:8f:0d:e4:e8:91:dd:ee:ca +# SHA256 Fingerprint: 46:ed:c3:68:90:46:d5:3a:45:3f:b3:10:4a:b8:0d:ca:ec:65:8b:26:60:ea:16:29:dd:7e:86:79:90:64:87:16 +-----BEGIN CERTIFICATE----- +MIIEYzCCA0ugAwIBAgIBATANBgkqhkiG9w0BAQsFADCB0jELMAkGA1UEBhMCVFIx +GDAWBgNVBAcTD0dlYnplIC0gS29jYWVsaTFCMEAGA1UEChM5VHVya2l5ZSBCaWxp +bXNlbCB2ZSBUZWtub2xvamlrIEFyYXN0aXJtYSBLdXJ1bXUgLSBUVUJJVEFLMS0w +KwYDVQQLEyRLYW11IFNlcnRpZmlrYXN5b24gTWVya2V6aSAtIEthbXUgU00xNjA0 +BgNVBAMTLVRVQklUQUsgS2FtdSBTTSBTU0wgS29rIFNlcnRpZmlrYXNpIC0gU3Vy +dW0gMTAeFw0xMzExMjUwODI1NTVaFw00MzEwMjUwODI1NTVaMIHSMQswCQYDVQQG +EwJUUjEYMBYGA1UEBxMPR2ViemUgLSBLb2NhZWxpMUIwQAYDVQQKEzlUdXJraXll +IEJpbGltc2VsIHZlIFRla25vbG9qaWsgQXJhc3Rpcm1hIEt1cnVtdSAtIFRVQklU +QUsxLTArBgNVBAsTJEthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppIC0gS2FtdSBT +TTE2MDQGA1UEAxMtVFVCSVRBSyBLYW11IFNNIFNTTCBLb2sgU2VydGlmaWthc2kg +LSBTdXJ1bSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr3UwM6q7 +a9OZLBI3hNmNe5eA027n/5tQlT6QlVZC1xl8JoSNkvoBHToP4mQ4t4y86Ij5iySr +LqP1N+RAjhgleYN1Hzv/bKjFxlb4tO2KRKOrbEz8HdDc72i9z+SqzvBV96I01INr +N3wcwv61A+xXzry0tcXtAA9TNypN9E8Mg/uGz8v+jE69h/mniyFXnHrfA2eJLJ2X +YacQuFWQfw4tJzh03+f92k4S400VIgLI4OD8D62K18lUUMw7D8oWgITQUVbDjlZ/ +iSIzL+aFCr2lqBs23tPcLG07xxO9WSMs5uWk99gL7eqQQESolbuT1dCANLZGeA4f +AJNG4e7p+exPFwIDAQABo0IwQDAdBgNVHQ4EFgQUZT/HiobGPN08VFw1+DrtUgxH +V8gwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL +BQADggEBACo/4fEyjq7hmFxLXs9rHmoJ0iKpEsdeV31zVmSAhHqT5Am5EM2fKifh +AHe+SMg1qIGf5LgsyX8OsNJLN13qudULXjS99HMpw+0mFZx+CFOKWI3QSyjfwbPf +IPP54+M638yclNhOT8NrF7f3cuitZjO1JVOr4PhMqZ398g26rrnZqsZr+ZO7rqu4 +lzwDGrpDxpa5RXI4s6ehlj2Re37AIVNMh+3yC1SVUZPVIqUNivGTDj5UDrDYyU7c +8jEyVupk+eq1nRZmQnLzf9OxMUP8pI4X8W0jq5Rm+K37DwhuJi1/FwcJsoz7UMCf +lo3Ptv0AnVoUmr8CRPXBwp8iXqIPoeM= +-----END CERTIFICATE----- + +# Issuer: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD. +# Subject: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD. +# Label: "GDCA TrustAUTH R5 ROOT" +# Serial: 9009899650740120186 +# MD5 Fingerprint: 63:cc:d9:3d:34:35:5c:6f:53:a3:e2:08:70:48:1f:b4 +# SHA1 Fingerprint: 0f:36:38:5b:81:1a:25:c3:9b:31:4e:83:ca:e9:34:66:70:cc:74:b4 +# SHA256 Fingerprint: bf:ff:8f:d0:44:33:48:7d:6a:8a:a6:0c:1a:29:76:7a:9f:c2:bb:b0:5e:42:0f:71:3a:13:b9:92:89:1d:38:93 +-----BEGIN CERTIFICATE----- +MIIFiDCCA3CgAwIBAgIIfQmX/vBH6nowDQYJKoZIhvcNAQELBQAwYjELMAkGA1UE +BhMCQ04xMjAwBgNVBAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZ +IENPLixMVEQuMR8wHQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMB4XDTE0 +MTEyNjA1MTMxNVoXDTQwMTIzMTE1NTk1OVowYjELMAkGA1UEBhMCQ04xMjAwBgNV +BAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZIENPLixMVEQuMR8w +HQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA2aMW8Mh0dHeb7zMNOwZ+Vfy1YI92hhJCfVZmPoiC7XJj +Dp6L3TQsAlFRwxn9WVSEyfFrs0yw6ehGXTjGoqcuEVe6ghWinI9tsJlKCvLriXBj +TnnEt1u9ol2x8kECK62pOqPseQrsXzrj/e+APK00mxqriCZ7VqKChh/rNYmDf1+u +KU49tm7srsHwJ5uu4/Ts765/94Y9cnrrpftZTqfrlYwiOXnhLQiPzLyRuEH3FMEj +qcOtmkVEs7LXLM3GKeJQEK5cy4KOFxg2fZfmiJqwTTQJ9Cy5WmYqsBebnh52nUpm +MUHfP/vFBu8btn4aRjb3ZGM74zkYI+dndRTVdVeSN72+ahsmUPI2JgaQxXABZG12 +ZuGR224HwGGALrIuL4xwp9E7PLOR5G62xDtw8mySlwnNR30YwPO7ng/Wi64HtloP +zgsMR6flPri9fcebNaBhlzpBdRfMK5Z3KpIhHtmVdiBnaM8Nvd/WHwlqmuLMc3Gk +L30SgLdTMEZeS1SZD2fJpcjyIMGC7J0R38IC+xo70e0gmu9lZJIQDSri3nDxGGeC +jGHeuLzRL5z7D9Ar7Rt2ueQ5Vfj4oR24qoAATILnsn8JuLwwoC8N9VKejveSswoA +HQBUlwbgsQfZxw9cZX08bVlX5O2ljelAU58VS6Bx9hoh49pwBiFYFIeFd3mqgnkC +AwEAAaNCMEAwHQYDVR0OBBYEFOLJQJ9NzuiaoXzPDj9lxSmIahlRMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQDRSVfg +p8xoWLoBDysZzY2wYUWsEe1jUGn4H3++Fo/9nesLqjJHdtJnJO29fDMylyrHBYZm +DRd9FBUb1Ov9H5r2XpdptxolpAqzkT9fNqyL7FeoPueBihhXOYV0GkLH6VsTX4/5 +COmSdI31R9KrO9b7eGZONn356ZLpBN79SWP8bfsUcZNnL0dKt7n/HipzcEYwv1ry +L3ml4Y0M2fmyYzeMN2WFcGpcWwlyua1jPLHd+PwyvzeG5LuOmCd+uh8W4XAR8gPf +JWIyJyYYMoSf/wA6E7qaTfRPuBRwIrHKK5DOKcFw9C+df/KQHtZa37dG/OaG+svg +IHZ6uqbL9XzeYqWxi+7egmaKTjowHz+Ay60nugxe19CxVsp3cbK1daFQqUBDF8Io +2c9Si1vIY9RCPqAzekYu9wogRlR+ak8x8YF+QnQ4ZXMn7sZ8uI7XpTrXmKGcjBBV +09tL7ECQ8s1uV9JiDnxXk7Gnbc2dg7sq5+W2O3FYrf3RRbxake5TFW/TRQl1brqQ +XR4EzzffHqhmsYzmIGrv/EhOdJhCrylvLmrH+33RZjEizIYAfmaDDEL0vTSSwxrq +T8p+ck0LcIymSLumoRT2+1hEmRSuqguTaaApJUqlyyvdimYHFngVV3Eb7PVHhPOe +MTd61X8kreS8/f3MboPoDKi3QWwH3b08hpcv0g== +-----END CERTIFICATE----- + +# Issuer: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Subject: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Label: "TrustCor RootCert CA-1" +# Serial: 15752444095811006489 +# MD5 Fingerprint: 6e:85:f1:dc:1a:00:d3:22:d5:b2:b2:ac:6b:37:05:45 +# SHA1 Fingerprint: ff:bd:cd:e7:82:c8:43:5e:3c:6f:26:86:5c:ca:a8:3a:45:5b:c3:0a +# SHA256 Fingerprint: d4:0e:9c:86:cd:8f:e4:68:c1:77:69:59:f4:9e:a7:74:fa:54:86:84:b6:c4:06:f3:90:92:61:f4:dc:e2:57:5c +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIJANqb7HHzA7AZMA0GCSqGSIb3DQEBCwUAMIGkMQswCQYD +VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk +MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U +cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRydXN0Q29y +IFJvb3RDZXJ0IENBLTEwHhcNMTYwMjA0MTIzMjE2WhcNMjkxMjMxMTcyMzE2WjCB +pDELMAkGA1UEBhMCUEExDzANBgNVBAgMBlBhbmFtYTEUMBIGA1UEBwwLUGFuYW1h +IENpdHkxJDAiBgNVBAoMG1RydXN0Q29yIFN5c3RlbXMgUy4gZGUgUi5MLjEnMCUG +A1UECwweVHJ1c3RDb3IgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MR8wHQYDVQQDDBZU +cnVzdENvciBSb290Q2VydCBDQS0xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAv463leLCJhJrMxnHQFgKq1mqjQCj/IDHUHuO1CAmujIS2CNUSSUQIpid +RtLByZ5OGy4sDjjzGiVoHKZaBeYei0i/mJZ0PmnK6bV4pQa81QBeCQryJ3pS/C3V +seq0iWEk8xoT26nPUu0MJLq5nux+AHT6k61sKZKuUbS701e/s/OojZz0JEsq1pme +9J7+wH5COucLlVPat2gOkEz7cD+PSiyU8ybdY2mplNgQTsVHCJCZGxdNuWxu72CV +EY4hgLW9oHPY0LJ3xEXqWib7ZnZ2+AYfYW0PVcWDtxBWcgYHpfOxGgMFZA6dWorW +hnAbJN7+KIor0Gqw/Hqi3LJ5DotlDwIDAQABo2MwYTAdBgNVHQ4EFgQU7mtJPHo/ +DeOxCbeKyKsZn3MzUOcwHwYDVR0jBBgwFoAU7mtJPHo/DeOxCbeKyKsZn3MzUOcw +DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQAD +ggEBACUY1JGPE+6PHh0RU9otRCkZoB5rMZ5NDp6tPVxBb5UrJKF5mDo4Nvu7Zp5I +/5CQ7z3UuJu0h3U/IJvOcs+hVcFNZKIZBqEHMwwLKeXx6quj7LUKdJDHfXLy11yf +ke+Ri7fc7Waiz45mO7yfOgLgJ90WmMCV1Aqk5IGadZQ1nJBfiDcGrVmVCrDRZ9MZ +yonnMlo2HD6CqFqTvsbQZJG2z9m2GM/bftJlo6bEjhcxwft+dtvTheNYsnd6djts +L1Ac59v2Z3kf9YKVmgenFK+P3CghZwnS1k1aHBkcjndcw5QkPTJrS37UeJSDvjdN +zl/HHk484IkzlQsPpTLWPFp5LBk= +-----END CERTIFICATE----- + +# Issuer: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Subject: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Label: "TrustCor RootCert CA-2" +# Serial: 2711694510199101698 +# MD5 Fingerprint: a2:e1:f8:18:0b:ba:45:d5:c7:41:2a:bb:37:52:45:64 +# SHA1 Fingerprint: b8:be:6d:cb:56:f1:55:b9:63:d4:12:ca:4e:06:34:c7:94:b2:1c:c0 +# SHA256 Fingerprint: 07:53:e9:40:37:8c:1b:d5:e3:83:6e:39:5d:ae:a5:cb:83:9e:50:46:f1:bd:0e:ae:19:51:cf:10:fe:c7:c9:65 +-----BEGIN CERTIFICATE----- +MIIGLzCCBBegAwIBAgIIJaHfyjPLWQIwDQYJKoZIhvcNAQELBQAwgaQxCzAJBgNV +BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw +IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy +dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEfMB0GA1UEAwwWVHJ1c3RDb3Ig +Um9vdENlcnQgQ0EtMjAeFw0xNjAyMDQxMjMyMjNaFw0zNDEyMzExNzI2MzlaMIGk +MQswCQYDVQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEg +Q2l0eTEkMCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYD +VQQLDB5UcnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRy +dXN0Q29yIFJvb3RDZXJ0IENBLTIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCnIG7CKqJiJJWQdsg4foDSq8GbZQWU9MEKENUCrO2fk8eHyLAnK0IMPQo+ +QVqedd2NyuCb7GgypGmSaIwLgQ5WoD4a3SwlFIIvl9NkRvRUqdw6VC0xK5mC8tkq +1+9xALgxpL56JAfDQiDyitSSBBtlVkxs1Pu2YVpHI7TYabS3OtB0PAx1oYxOdqHp +2yqlO/rOsP9+aij9JxzIsekp8VduZLTQwRVtDr4uDkbIXvRR/u8OYzo7cbrPb1nK +DOObXUm4TOJXsZiKQlecdu/vvdFoqNL0Cbt3Nb4lggjEFixEIFapRBF37120Hape +az6LMvYHL1cEksr1/p3C6eizjkxLAjHZ5DxIgif3GIJ2SDpxsROhOdUuxTTCHWKF +3wP+TfSvPd9cW436cOGlfifHhi5qjxLGhF5DUVCcGZt45vz27Ud+ez1m7xMTiF88 +oWP7+ayHNZ/zgp6kPwqcMWmLmaSISo5uZk3vFsQPeSghYA2FFn3XVDjxklb9tTNM +g9zXEJ9L/cb4Qr26fHMC4P99zVvh1Kxhe1fVSntb1IVYJ12/+CtgrKAmrhQhJ8Z3 +mjOAPF5GP/fDsaOGM8boXg25NSyqRsGFAnWAoOsk+xWq5Gd/bnc/9ASKL3x74xdh +8N0JqSDIvgmk0H5Ew7IwSjiqqewYmgeCK9u4nBit2uBGF6zPXQIDAQABo2MwYTAd +BgNVHQ4EFgQU2f4hQG6UnrybPZx9mCAZ5YwwYrIwHwYDVR0jBBgwFoAU2f4hQG6U +nrybPZx9mCAZ5YwwYrIwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYw +DQYJKoZIhvcNAQELBQADggIBAJ5Fngw7tu/hOsh80QA9z+LqBrWyOrsGS2h60COX +dKcs8AjYeVrXWoSK2BKaG9l9XE1wxaX5q+WjiYndAfrs3fnpkpfbsEZC89NiqpX+ +MWcUaViQCqoL7jcjx1BRtPV+nuN79+TMQjItSQzL/0kMmx40/W5ulop5A7Zv2wnL +/V9lFDfhOPXzYRZY5LVtDQsEGz9QLX+zx3oaFoBg+Iof6Rsqxvm6ARppv9JYx1RX +CI/hOWB3S6xZhBqI8d3LT3jX5+EzLfzuQfogsL7L9ziUwOHQhQ+77Sxzq+3+knYa +ZH9bDTMJBzN7Bj8RpFxwPIXAz+OQqIN3+tvmxYxoZxBnpVIt8MSZj3+/0WvitUfW +2dCFmU2Umw9Lje4AWkcdEQOsQRivh7dvDDqPys/cA8GiCcjl/YBeyGBCARsaU1q7 +N6a3vLqE6R5sGtRk2tRD/pOLS/IseRYQ1JMLiI+h2IYURpFHmygk71dSTlxCnKr3 +Sewn6EAes6aJInKc9Q0ztFijMDvd1GpUk74aTfOTlPf8hAs/hCBcNANExdqtvArB +As8e5ZTZ845b2EzwnexhF7sUMlQMAimTHpKG9n/v55IFDlndmQguLvqcAFLTxWYp +5KeXRKQOKIETNcX2b2TmQcTVL8w0RSXPQQCWPUouwpaYT05KnJe32x+SMsj/D1Fu +1uwJ +-----END CERTIFICATE----- + +# Issuer: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Subject: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Label: "TrustCor ECA-1" +# Serial: 9548242946988625984 +# MD5 Fingerprint: 27:92:23:1d:0a:f5:40:7c:e9:e6:6b:9d:d8:f5:e7:6c +# SHA1 Fingerprint: 58:d1:df:95:95:67:6b:63:c0:f0:5b:1c:17:4d:8b:84:0b:c8:78:bd +# SHA256 Fingerprint: 5a:88:5d:b1:9c:01:d9:12:c5:75:93:88:93:8c:af:bb:df:03:1a:b2:d4:8e:91:ee:15:58:9b:42:97:1d:03:9c +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIJAISCLF8cYtBAMA0GCSqGSIb3DQEBCwUAMIGcMQswCQYD +VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk +MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U +cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxFzAVBgNVBAMMDlRydXN0Q29y +IEVDQS0xMB4XDTE2MDIwNDEyMzIzM1oXDTI5MTIzMTE3MjgwN1owgZwxCzAJBgNV +BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw +IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy +dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEXMBUGA1UEAwwOVHJ1c3RDb3Ig +RUNBLTEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPj+ARtZ+odnbb +3w9U73NjKYKtR8aja+3+XzP4Q1HpGjORMRegdMTUpwHmspI+ap3tDvl0mEDTPwOA +BoJA6LHip1GnHYMma6ve+heRK9jGrB6xnhkB1Zem6g23xFUfJ3zSCNV2HykVh0A5 +3ThFEXXQmqc04L/NyFIduUd+Dbi7xgz2c1cWWn5DkR9VOsZtRASqnKmcp0yJF4Ou +owReUoCLHhIlERnXDH19MURB6tuvsBzvgdAsxZohmz3tQjtQJvLsznFhBmIhVE5/ +wZ0+fyCMgMsq2JdiyIMzkX2woloPV+g7zPIlstR8L+xNxqE6FXrntl019fZISjZF +ZtS6mFjBAgMBAAGjYzBhMB0GA1UdDgQWBBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAf +BgNVHSMEGDAWgBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAPBgNVHRMBAf8EBTADAQH/ +MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAQEABT41XBVwm8nHc2Fv +civUwo/yQ10CzsSUuZQRg2dd4mdsdXa/uwyqNsatR5Nj3B5+1t4u/ukZMjgDfxT2 +AHMsWbEhBuH7rBiVDKP/mZb3Kyeb1STMHd3BOuCYRLDE5D53sXOpZCz2HAF8P11F +hcCF5yWPldwX8zyfGm6wyuMdKulMY/okYWLW2n62HGz1Ah3UKt1VkOsqEUc8Ll50 +soIipX1TH0XsJ5F95yIW6MBoNtjG8U+ARDL54dHRHareqKucBK+tIA5kmE2la8BI +WJZpTdwHjFGTot+fDz2LYLSCjaoITmJF4PkL0uDgPFveXHEnJcLmA4GLEFPjx1Wi +tJ/X5g== +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com Root Certification Authority RSA O=SSL Corporation +# Subject: CN=SSL.com Root Certification Authority RSA O=SSL Corporation +# Label: "SSL.com Root Certification Authority RSA" +# Serial: 8875640296558310041 +# MD5 Fingerprint: 86:69:12:c0:70:f1:ec:ac:ac:c2:d5:bc:a5:5b:a1:29 +# SHA1 Fingerprint: b7:ab:33:08:d1:ea:44:77:ba:14:80:12:5a:6f:bd:a9:36:49:0c:bb +# SHA256 Fingerprint: 85:66:6a:56:2e:e0:be:5c:e9:25:c1:d8:89:0a:6f:76:a8:7e:c1:6d:4d:7d:5f:29:ea:74:19:cf:20:12:3b:69 +-----BEGIN CERTIFICATE----- +MIIF3TCCA8WgAwIBAgIIeyyb0xaAMpkwDQYJKoZIhvcNAQELBQAwfDELMAkGA1UE +BhMCVVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQK +DA9TU0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSBSU0EwHhcNMTYwMjEyMTczOTM5WhcNNDEwMjEyMTcz +OTM5WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv +dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNv +bSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFJTQTCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBAPkP3aMrfcvQKv7sZ4Wm5y4bunfh4/WvpOz6Sl2R +xFdHaxh3a3by/ZPkPQ/CFp4LZsNWlJ4Xg4XOVu/yFv0AYvUiCVToZRdOQbngT0aX +qhvIuG5iXmmxX9sqAn78bMrzQdjt0Oj8P2FI7bADFB0QDksZ4LtO7IZl/zbzXmcC +C52GVWH9ejjt/uIZALdvoVBidXQ8oPrIJZK0bnoix/geoeOy3ZExqysdBP+lSgQ3 +6YWkMyv94tZVNHwZpEpox7Ko07fKoZOI68GXvIz5HdkihCR0xwQ9aqkpk8zruFvh +/l8lqjRYyMEjVJ0bmBHDOJx+PYZspQ9AhnwC9FwCTyjLrnGfDzrIM/4RJTXq/LrF +YD3ZfBjVsqnTdXgDciLKOsMf7yzlLqn6niy2UUb9rwPW6mBo6oUWNmuF6R7As93E +JNyAKoFBbZQ+yODJgUEAnl6/f8UImKIYLEJAs/lvOCdLToD0PYFH4Ih86hzOtXVc +US4cK38acijnALXRdMbX5J+tB5O2UzU1/Dfkw/ZdFr4hc96SCvigY2q8lpJqPvi8 +ZVWb3vUNiSYE/CUapiVpy8JtynziWV+XrOvvLsi81xtZPCvM8hnIk2snYxnP/Okm ++Mpxm3+T/jRnhE6Z6/yzeAkzcLpmpnbtG3PrGqUNxCITIJRWCk4sbE6x/c+cCbqi +M+2HAgMBAAGjYzBhMB0GA1UdDgQWBBTdBAkHovV6fVJTEpKV7jiAJQ2mWTAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFN0ECQei9Xp9UlMSkpXuOIAlDaZZMA4G +A1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAIBgRlCn7Jp0cHh5wYfGV +cpNxJK1ok1iOMq8bs3AD/CUrdIWQPXhq9LmLpZc7tRiRux6n+UBbkflVma8eEdBc +Hadm47GUBwwyOabqG7B52B2ccETjit3E+ZUfijhDPwGFpUenPUayvOUiaPd7nNgs +PgohyC0zrL/FgZkxdMF1ccW+sfAjRfSda/wZY52jvATGGAslu1OJD7OAUN5F7kR/ +q5R4ZJjT9ijdh9hwZXT7DrkT66cPYakylszeu+1jTBi7qUD3oFRuIIhxdRjqerQ0 +cuAjJ3dctpDqhiVAq+8zD8ufgr6iIPv2tS0a5sKFsXQP+8hlAqRSAUfdSSLBv9jr +a6x+3uxjMxW3IwiPxg+NQVrdjsW5j+VFP3jbutIbQLH+cU0/4IGiul607BXgk90I +H37hVZkLId6Tngr75qNJvTYw/ud3sqB1l7UtgYgXZSD32pAAn8lSzDLKNXz1PQ/Y +K9f1JmzJBjSWFupwWRoyeXkLtoh/D1JIPb9s2KJELtFOt3JY04kTlf5Eq/jXixtu +nLwsoFvVagCvXzfh1foQC5ichucmj87w7G6KVwuA406ywKBjYZC6VWg3dGq2ktuf +oYYitmUnDuy2n0Jg5GfCtdpBC8TTi2EbvPofkSvXRAdeuims2cXp71NIWuuA8ShY +Ic2wBlX7Jz9TkHCpBB5XJ7k= +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com Root Certification Authority ECC O=SSL Corporation +# Subject: CN=SSL.com Root Certification Authority ECC O=SSL Corporation +# Label: "SSL.com Root Certification Authority ECC" +# Serial: 8495723813297216424 +# MD5 Fingerprint: 2e:da:e4:39:7f:9c:8f:37:d1:70:9f:26:17:51:3a:8e +# SHA1 Fingerprint: c3:19:7c:39:24:e6:54:af:1b:c4:ab:20:95:7a:e2:c3:0e:13:02:6a +# SHA256 Fingerprint: 34:17:bb:06:cc:60:07:da:1b:96:1c:92:0b:8a:b4:ce:3f:ad:82:0e:4a:a3:0b:9a:cb:c4:a7:4e:bd:ce:bc:65 +-----BEGIN CERTIFICATE----- +MIICjTCCAhSgAwIBAgIIdebfy8FoW6gwCgYIKoZIzj0EAwIwfDELMAkGA1UEBhMC +VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T +U0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNDAzWhcNNDEwMjEyMTgxNDAz +WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hvdXN0 +b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNvbSBS +b290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49AgEGBSuB +BAAiA2IABEVuqVDEpiM2nl8ojRfLliJkP9x6jh3MCLOicSS6jkm5BBtHllirLZXI +7Z4INcgn64mMU1jrYor+8FsPazFSY0E7ic3s7LaNGdM0B9y7xgZ/wkWV7Mt/qCPg +CemB+vNH06NjMGEwHQYDVR0OBBYEFILRhXMw5zUE044CkvvlpNHEIejNMA8GA1Ud +EwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUgtGFczDnNQTTjgKS++Wk0cQh6M0wDgYD +VR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2cAMGQCMG/n61kRpGDPYbCWe+0F+S8T +kdzt5fxQaxFGRrMcIQBiu77D5+jNB5n5DQtdcj7EqgIwH7y6C+IwJPt8bYBVCpk+ +gA0z5Wajs6O7pdWLjwkspl1+4vAHCGht0nxpbl/f5Wpl +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation +# Subject: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation +# Label: "SSL.com EV Root Certification Authority RSA R2" +# Serial: 6248227494352943350 +# MD5 Fingerprint: e1:1e:31:58:1a:ae:54:53:02:f6:17:6a:11:7b:4d:95 +# SHA1 Fingerprint: 74:3a:f0:52:9b:d0:32:a0:f4:4a:83:cd:d4:ba:a9:7b:7c:2e:c4:9a +# SHA256 Fingerprint: 2e:7b:f1:6c:c2:24:85:a7:bb:e2:aa:86:96:75:07:61:b0:ae:39:be:3b:2f:e9:d0:cc:6d:4e:f7:34:91:42:5c +-----BEGIN CERTIFICATE----- +MIIF6zCCA9OgAwIBAgIIVrYpzTS8ePYwDQYJKoZIhvcNAQELBQAwgYIxCzAJBgNV +BAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4GA1UEBwwHSG91c3RvbjEYMBYGA1UE +CgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQDDC5TU0wuY29tIEVWIFJvb3QgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIyMB4XDTE3MDUzMTE4MTQzN1oXDTQy +MDUzMDE4MTQzN1owgYIxCzAJBgNVBAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4G +A1UEBwwHSG91c3RvbjEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQD +DC5TU0wuY29tIEVWIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIy +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAjzZlQOHWTcDXtOlG2mvq +M0fNTPl9fb69LT3w23jhhqXZuglXaO1XPqDQCEGD5yhBJB/jchXQARr7XnAjssuf +OePPxU7Gkm0mxnu7s9onnQqG6YE3Bf7wcXHswxzpY6IXFJ3vG2fThVUCAtZJycxa +4bH3bzKfydQ7iEGonL3Lq9ttewkfokxykNorCPzPPFTOZw+oz12WGQvE43LrrdF9 +HSfvkusQv1vrO6/PgN3B0pYEW3p+pKk8OHakYo6gOV7qd89dAFmPZiw+B6KjBSYR +aZfqhbcPlgtLyEDhULouisv3D5oi53+aNxPN8k0TayHRwMwi8qFG9kRpnMphNQcA +b9ZhCBHqurj26bNg5U257J8UZslXWNvNh2n4ioYSA0e/ZhN2rHd9NCSFg83XqpyQ +Gp8hLH94t2S42Oim9HizVcuE0jLEeK6jj2HdzghTreyI/BXkmg3mnxp3zkyPuBQV +PWKchjgGAGYS5Fl2WlPAApiiECtoRHuOec4zSnaqW4EWG7WK2NAAe15itAnWhmMO +pgWVSbooi4iTsjQc2KRVbrcc0N6ZVTsj9CLg+SlmJuwgUHfbSguPvuUCYHBBXtSu +UDkiFCbLsjtzdFVHB3mBOagwE0TlBIqulhMlQg+5U8Sb/M3kHN48+qvWBkofZ6aY +MBzdLNvcGJVXZsb/XItW9XcCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNV +HSMEGDAWgBT5YLvU49U09rj1BoAlp3PbRmmonjAdBgNVHQ4EFgQU+WC71OPVNPa4 +9QaAJadz20ZpqJ4wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBW +s47LCp1Jjr+kxJG7ZhcFUZh1++VQLHqe8RT6q9OKPv+RKY9ji9i0qVQBDb6Thi/5 +Sm3HXvVX+cpVHBK+Rw82xd9qt9t1wkclf7nxY/hoLVUE0fKNsKTPvDxeH3jnpaAg +cLAExbf3cqfeIg29MyVGjGSSJuM+LmOW2puMPfgYCdcDzH2GguDKBAdRUNf/ktUM +79qGn5nX67evaOI5JpS6aLe/g9Pqemc9YmeuJeVy6OLk7K4S9ksrPJ/psEDzOFSz +/bdoyNrGj1E8svuR3Bznm53htw1yj+KkxKl4+esUrMZDBcJlOSgYAsOCsp0FvmXt +ll9ldDz7CTUue5wT/RsPXcdtgTpWD8w74a8CLyKsRspGPKAcTNZEtF4uXBVmCeEm +Kf7GUmG6sXP/wwyc5WxqlD8UykAWlYTzWamsX0xhk23RO8yilQwipmdnRC652dKK +QbNmC1r7fSOl8hqw/96bg5Qu0T/fkreRrwU7ZcegbLHNYhLDkBvjJc40vG93drEQ +w/cFGsDWr3RiSBd3kmmQYRzelYB0VI8YHMPzA9C/pEN1hlMYegouCRw2n5H9gooi +S9EOUCXdywMMF8mDAAhONU2Ki+3wApRmLER/y5UnlhetCTCstnEXbosX9hwJ1C07 +mKVx01QT2WDz9UtmT/rx7iASjbSsV7FFY6GsdqnC+w== +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation +# Subject: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation +# Label: "SSL.com EV Root Certification Authority ECC" +# Serial: 3182246526754555285 +# MD5 Fingerprint: 59:53:22:65:83:42:01:54:c0:ce:42:b9:5a:7c:f2:90 +# SHA1 Fingerprint: 4c:dd:51:a3:d1:f5:20:32:14:b0:c6:c5:32:23:03:91:c7:46:42:6d +# SHA256 Fingerprint: 22:a2:c1:f7:bd:ed:70:4c:c1:e7:01:b5:f4:08:c3:10:88:0f:e9:56:b5:de:2a:4a:44:f9:9c:87:3a:25:a7:c8 +-----BEGIN CERTIFICATE----- +MIIClDCCAhqgAwIBAgIILCmcWxbtBZUwCgYIKoZIzj0EAwIwfzELMAkGA1UEBhMC +VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T +U0wgQ29ycG9yYXRpb24xNDAyBgNVBAMMK1NTTC5jb20gRVYgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNTIzWhcNNDEwMjEyMTgx +NTIzWjB/MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv +dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrU1NMLmNv +bSBFViBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49 +AgEGBSuBBAAiA2IABKoSR5CYG/vvw0AHgyBO8TCCogbR8pKGYfL2IWjKAMTH6kMA +VIbc/R/fALhBYlzccBYy3h+Z1MzFB8gIH2EWB1E9fVwHU+M1OIzfzZ/ZLg1Kthku +WnBaBu2+8KGwytAJKaNjMGEwHQYDVR0OBBYEFFvKXuXe0oGqzagtZFG22XKbl+ZP +MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUW8pe5d7SgarNqC1kUbbZcpuX +5k8wDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2gAMGUCMQCK5kCJN+vp1RPZ +ytRrJPOwPYdGWBrssd9v+1a6cGvHOMzosYxPD/fxZ3YOg9AeUY8CMD32IygmTMZg +h5Mmm7I1HrrW9zzRHM76JTymGoEVW/MSD2zuZYrJh6j5B+BimoxcSg== +-----END CERTIFICATE----- diff --git a/py311/lib/python3.11/site-packages/botocore/client.py b/py311/lib/python3.11/site-packages/botocore/client.py new file mode 100644 index 0000000000000000000000000000000000000000..87eb7f1d7096130de58d08a6255a3e4c9bb154e0 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/client.py @@ -0,0 +1,1440 @@ +# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import logging + +from botocore import ( + UNSIGNED, # noqa: F401 + waiter, + xform_name, +) +from botocore.args import ClientArgsCreator +from botocore.auth import AUTH_TYPE_MAPS, resolve_auth_type +from botocore.awsrequest import prepare_request_dict +from botocore.compress import maybe_compress_request +from botocore.config import Config +from botocore.context import with_current_context +from botocore.credentials import RefreshableCredentials +from botocore.discovery import ( + EndpointDiscoveryHandler, + EndpointDiscoveryManager, + block_endpoint_discovery_required_operations, +) +from botocore.docs.docstring import ClientMethodDocstring, PaginatorDocstring +from botocore.exceptions import ( + ClientError, # noqa: F401 + DataNotFoundError, + InvalidEndpointDiscoveryConfigurationError, + OperationNotPageableError, + UnknownServiceError, + UnknownSignatureVersionError, +) +from botocore.history import get_global_history_recorder +from botocore.hooks import first_non_none_response +from botocore.httpchecksum import ( + apply_request_checksum, + resolve_checksum_context, +) +from botocore.model import ServiceModel +from botocore.paginate import Paginator +from botocore.retries import adaptive, standard +from botocore.useragent import UserAgentString, register_feature_id +from botocore.utils import ( + CachedProperty, + EventbridgeSignerSetter, + S3ArnParamHandler, # noqa: F401 + S3ControlArnParamHandler, # noqa: F401 + S3ControlArnParamHandlerv2, + S3ControlEndpointSetter, # noqa: F401 + S3EndpointSetter, # noqa: F401 + S3ExpressIdentityResolver, + S3RegionRedirector, # noqa: F401 + S3RegionRedirectorv2, + ensure_boolean, + get_service_module_name, +) + +logger = logging.getLogger(__name__) +history_recorder = get_global_history_recorder() + + +class ClientCreator: + """Creates client objects for a service.""" + + def __init__( + self, + loader, + endpoint_resolver, + user_agent, + event_emitter, + retry_handler_factory, + retry_config_translator, + response_parser_factory=None, + exceptions_factory=None, + config_store=None, + user_agent_creator=None, + auth_token_resolver=None, + ): + self._loader = loader + self._endpoint_resolver = endpoint_resolver + self._user_agent = user_agent + self._event_emitter = event_emitter + self._retry_handler_factory = retry_handler_factory + self._retry_config_translator = retry_config_translator + self._response_parser_factory = response_parser_factory + self._exceptions_factory = exceptions_factory + # TODO: Migrate things away from scoped_config in favor of the + # config_store. The config store can pull things from both the scoped + # config and environment variables (and potentially more in the + # future). + self._config_store = config_store + self._user_agent_creator = user_agent_creator + self._auth_token_resolver = auth_token_resolver + + def create_client( + self, + service_name, + region_name, + is_secure=True, + endpoint_url=None, + verify=None, + credentials=None, + scoped_config=None, + api_version=None, + client_config=None, + auth_token=None, + ): + responses = self._event_emitter.emit( + 'choose-service-name', service_name=service_name + ) + service_name = first_non_none_response(responses, default=service_name) + service_model = self._load_service_model(service_name, api_version) + try: + endpoints_ruleset_data = self._load_service_endpoints_ruleset( + service_name, api_version + ) + partition_data = self._loader.load_data('partitions') + except UnknownServiceError: + endpoints_ruleset_data = None + partition_data = None + logger.info( + 'No endpoints ruleset found for service %s, falling back to ' + 'legacy endpoint routing.', + service_name, + ) + + cls = self._create_client_class(service_name, service_model) + region_name, client_config = self._normalize_fips_region( + region_name, client_config + ) + if auth := service_model.metadata.get('auth'): + service_signature_version = resolve_auth_type(auth) + else: + service_signature_version = service_model.metadata.get( + 'signatureVersion' + ) + endpoint_bridge = ClientEndpointBridge( + self._endpoint_resolver, + scoped_config, + client_config, + service_signing_name=service_model.metadata.get('signingName'), + config_store=self._config_store, + service_signature_version=service_signature_version, + ) + if token := self._evaluate_client_specific_token( + service_model.signing_name + ): + auth_token = token + client_args = self._get_client_args( + service_model, + region_name, + is_secure, + endpoint_url, + verify, + credentials, + scoped_config, + client_config, + endpoint_bridge, + auth_token, + endpoints_ruleset_data, + partition_data, + ) + service_client = cls(**client_args) + self._register_retries(service_client) + self._register_s3_events( + client=service_client, + endpoint_bridge=None, + endpoint_url=None, + client_config=client_config, + scoped_config=scoped_config, + ) + self._register_s3express_events(client=service_client) + self._register_s3_control_events(client=service_client) + self._register_importexport_events(client=service_client) + self._register_endpoint_discovery( + service_client, endpoint_url, client_config + ) + return service_client + + def create_client_class(self, service_name, api_version=None): + service_model = self._load_service_model(service_name, api_version) + return self._create_client_class(service_name, service_model) + + def _create_client_class(self, service_name, service_model): + class_attributes = self._create_methods(service_model) + py_name_to_operation_name = self._create_name_mapping(service_model) + class_attributes['_PY_TO_OP_NAME'] = py_name_to_operation_name + bases = [BaseClient] + service_id = service_model.service_id.hyphenize() + self._event_emitter.emit( + f'creating-client-class.{service_id}', + class_attributes=class_attributes, + base_classes=bases, + ) + class_name = get_service_module_name(service_model) + cls = type(str(class_name), tuple(bases), class_attributes) + return cls + + def _normalize_fips_region(self, region_name, client_config): + if region_name is not None: + normalized_region_name = region_name.replace('fips-', '').replace( + '-fips', '' + ) + # If region has been transformed then set flag + if normalized_region_name != region_name: + config_use_fips_endpoint = Config(use_fips_endpoint=True) + if client_config: + # Keeping endpoint setting client specific + client_config = client_config.merge( + config_use_fips_endpoint + ) + else: + client_config = config_use_fips_endpoint + logger.warning( + 'transforming region from %s to %s and setting ' + 'use_fips_endpoint to true. client should not ' + 'be configured with a fips psuedo region.', + region_name, + normalized_region_name, + ) + region_name = normalized_region_name + return region_name, client_config + + def _load_service_model(self, service_name, api_version=None): + json_model = self._loader.load_service_model( + service_name, 'service-2', api_version=api_version + ) + service_model = ServiceModel(json_model, service_name=service_name) + return service_model + + def _load_service_endpoints_ruleset(self, service_name, api_version=None): + return self._loader.load_service_model( + service_name, 'endpoint-rule-set-1', api_version=api_version + ) + + def _register_retries(self, client): + retry_mode = client.meta.config.retries['mode'] + if retry_mode == 'standard': + self._register_v2_standard_retries(client) + elif retry_mode == 'adaptive': + self._register_v2_standard_retries(client) + self._register_v2_adaptive_retries(client) + elif retry_mode == 'legacy': + self._register_legacy_retries(client) + else: + return + register_feature_id(f'RETRY_MODE_{retry_mode.upper()}') + + def _register_v2_standard_retries(self, client): + max_attempts = client.meta.config.retries.get('total_max_attempts') + kwargs = {'client': client} + if max_attempts is not None: + kwargs['max_attempts'] = max_attempts + standard.register_retry_handler(**kwargs) + + def _register_v2_adaptive_retries(self, client): + adaptive.register_retry_handler(client) + + def _register_legacy_retries(self, client): + endpoint_prefix = client.meta.service_model.endpoint_prefix + service_id = client.meta.service_model.service_id + service_event_name = service_id.hyphenize() + + # First, we load the entire retry config for all services, + # then pull out just the information we need. + original_config = self._loader.load_data('_retry') + if not original_config: + return + + retries = self._transform_legacy_retries(client.meta.config.retries) + retry_config = self._retry_config_translator.build_retry_config( + endpoint_prefix, + original_config.get('retry', {}), + original_config.get('definitions', {}), + retries, + ) + + logger.debug( + "Registering retry handlers for service: %s", + client.meta.service_model.service_name, + ) + handler = self._retry_handler_factory.create_retry_handler( + retry_config, endpoint_prefix + ) + unique_id = f'retry-config-{service_event_name}' + client.meta.events.register( + f"needs-retry.{service_event_name}", handler, unique_id=unique_id + ) + + def _transform_legacy_retries(self, retries): + if retries is None: + return + copied_args = retries.copy() + if 'total_max_attempts' in retries: + copied_args = retries.copy() + copied_args['max_attempts'] = ( + copied_args.pop('total_max_attempts') - 1 + ) + return copied_args + + def _get_retry_mode(self, client, config_store): + client_retries = client.meta.config.retries + if ( + client_retries is not None + and client_retries.get('mode') is not None + ): + return client_retries['mode'] + return config_store.get_config_variable('retry_mode') or 'legacy' + + def _register_endpoint_discovery(self, client, endpoint_url, config): + if endpoint_url is not None: + # Don't register any handlers in the case of a custom endpoint url + return + # Only attach handlers if the service supports discovery + if client.meta.service_model.endpoint_discovery_operation is None: + return + events = client.meta.events + service_id = client.meta.service_model.service_id.hyphenize() + enabled = False + if config and config.endpoint_discovery_enabled is not None: + enabled = config.endpoint_discovery_enabled + elif self._config_store: + enabled = self._config_store.get_config_variable( + 'endpoint_discovery_enabled' + ) + + enabled = self._normalize_endpoint_discovery_config(enabled) + if enabled and self._requires_endpoint_discovery(client, enabled): + discover = enabled is True + manager = EndpointDiscoveryManager( + client, always_discover=discover + ) + handler = EndpointDiscoveryHandler(manager) + handler.register(events, service_id) + else: + events.register( + 'before-parameter-build', + block_endpoint_discovery_required_operations, + ) + + def _normalize_endpoint_discovery_config(self, enabled): + """Config must either be a boolean-string or string-literal 'auto'""" + if isinstance(enabled, str): + enabled = enabled.lower().strip() + if enabled == 'auto': + return enabled + elif enabled in ('true', 'false'): + return ensure_boolean(enabled) + elif isinstance(enabled, bool): + return enabled + + raise InvalidEndpointDiscoveryConfigurationError(config_value=enabled) + + def _requires_endpoint_discovery(self, client, enabled): + if enabled == "auto": + return client.meta.service_model.endpoint_discovery_required + return enabled + + def _register_eventbridge_events( + self, client, endpoint_bridge, endpoint_url + ): + if client.meta.service_model.service_name != 'events': + return + EventbridgeSignerSetter( + endpoint_resolver=self._endpoint_resolver, + region=client.meta.region_name, + endpoint_url=endpoint_url, + ).register(client.meta.events) + + def _register_s3express_events( + self, + client, + endpoint_bridge=None, + endpoint_url=None, + client_config=None, + scoped_config=None, + ): + if client.meta.service_model.service_name != 's3': + return + S3ExpressIdentityResolver(client, RefreshableCredentials).register() + + def _register_s3_events( + self, + client, + endpoint_bridge, + endpoint_url, + client_config, + scoped_config, + ): + if client.meta.service_model.service_name != 's3': + return + S3RegionRedirectorv2(None, client).register() + self._set_s3_presign_signature_version( + client.meta, client_config, scoped_config + ) + client.meta.events.register( + 'before-parameter-build.s3', self._inject_s3_input_parameters + ) + + def _register_s3_control_events( + self, + client, + endpoint_bridge=None, + endpoint_url=None, + client_config=None, + scoped_config=None, + ): + if client.meta.service_model.service_name != 's3control': + return + S3ControlArnParamHandlerv2().register(client.meta.events) + + def _set_s3_presign_signature_version( + self, client_meta, client_config, scoped_config + ): + # This will return the manually configured signature version, or None + # if none was manually set. If a customer manually sets the signature + # version, we always want to use what they set. + provided_signature_version = _get_configured_signature_version( + 's3', client_config, scoped_config + ) + if provided_signature_version is not None: + return + + # Check to see if the region is a region that we know about. If we + # don't know about a region, then we can safely assume it's a new + # region that is sigv4 only, since all new S3 regions only allow sigv4. + # The only exception is aws-global. This is a pseudo-region for the + # global endpoint, we should respect the signature versions it + # supports, which includes v2. + regions = self._endpoint_resolver.get_available_endpoints( + 's3', client_meta.partition + ) + if ( + client_meta.region_name != 'aws-global' + and client_meta.region_name not in regions + ): + return + + # If it is a region we know about, we want to default to sigv2, so here + # we check to see if it is available. + endpoint = self._endpoint_resolver.construct_endpoint( + 's3', client_meta.region_name + ) + signature_versions = endpoint['signatureVersions'] + if 's3' not in signature_versions: + return + + # We now know that we're in a known region that supports sigv2 and + # the customer hasn't set a signature version so we default the + # signature version to sigv2. + client_meta.events.register( + 'choose-signer.s3', self._default_s3_presign_to_sigv2 + ) + + def _inject_s3_input_parameters(self, params, context, **kwargs): + context['input_params'] = {} + inject_parameters = ('Bucket', 'Delete', 'Key', 'Prefix') + for inject_parameter in inject_parameters: + if inject_parameter in params: + context['input_params'][inject_parameter] = params[ + inject_parameter + ] + + def _default_s3_presign_to_sigv2(self, signature_version, **kwargs): + """ + Returns the 's3' (sigv2) signer if presigning an s3 request. This is + intended to be used to set the default signature version for the signer + to sigv2. Situations where an asymmetric signature is required are the + exception, for example MRAP needs v4a. + + :type signature_version: str + :param signature_version: The current client signature version. + + :type signing_name: str + :param signing_name: The signing name of the service. + + :return: 's3' if the request is an s3 presign request, None otherwise + """ + if signature_version.startswith('v4a'): + return + + if signature_version.startswith('v4-s3express'): + return signature_version + + for suffix in ['-query', '-presign-post']: + if signature_version.endswith(suffix): + return f's3{suffix}' + + def _register_importexport_events( + self, + client, + endpoint_bridge=None, + endpoint_url=None, + client_config=None, + scoped_config=None, + ): + if client.meta.service_model.service_name != 'importexport': + return + self._set_importexport_signature_version( + client.meta, client_config, scoped_config + ) + + def _set_importexport_signature_version( + self, client_meta, client_config, scoped_config + ): + # This will return the manually configured signature version, or None + # if none was manually set. If a customer manually sets the signature + # version, we always want to use what they set. + configured_signature_version = _get_configured_signature_version( + 'importexport', client_config, scoped_config + ) + if configured_signature_version is not None: + return + + # importexport has a modeled signatureVersion of v2, but we + # previously switched to v4 via endpoint.json before endpoint rulesets. + # Override the model's signatureVersion for backwards compatability. + client_meta.events.register( + 'choose-signer.importexport', self._default_signer_to_sigv4 + ) + + def _default_signer_to_sigv4(self, signature_version, **kwargs): + return 'v4' + + def _get_client_args( + self, + service_model, + region_name, + is_secure, + endpoint_url, + verify, + credentials, + scoped_config, + client_config, + endpoint_bridge, + auth_token, + endpoints_ruleset_data, + partition_data, + ): + args_creator = ClientArgsCreator( + self._event_emitter, + self._user_agent, + self._response_parser_factory, + self._loader, + self._exceptions_factory, + config_store=self._config_store, + user_agent_creator=self._user_agent_creator, + ) + return args_creator.get_client_args( + service_model, + region_name, + is_secure, + endpoint_url, + verify, + credentials, + scoped_config, + client_config, + endpoint_bridge, + auth_token, + endpoints_ruleset_data, + partition_data, + ) + + def _create_methods(self, service_model): + op_dict = {} + for operation_name in service_model.operation_names: + py_operation_name = xform_name(operation_name) + op_dict[py_operation_name] = self._create_api_method( + py_operation_name, operation_name, service_model + ) + return op_dict + + def _create_name_mapping(self, service_model): + # py_name -> OperationName, for every operation available + # for a service. + mapping = {} + for operation_name in service_model.operation_names: + py_operation_name = xform_name(operation_name) + mapping[py_operation_name] = operation_name + return mapping + + def _create_api_method( + self, py_operation_name, operation_name, service_model + ): + def _api_call(self, *args, **kwargs): + # We're accepting *args so that we can give a more helpful + # error message than TypeError: _api_call takes exactly + # 1 argument. + if args: + raise TypeError( + f"{py_operation_name}() only accepts keyword arguments." + ) + # The "self" in this scope is referring to the BaseClient. + return self._make_api_call(operation_name, kwargs) + + _api_call.__name__ = str(py_operation_name) + + # Add the docstring to the client method + operation_model = service_model.operation_model(operation_name) + docstring = ClientMethodDocstring( + operation_model=operation_model, + method_name=operation_name, + event_emitter=self._event_emitter, + method_description=operation_model.documentation, + example_prefix=f'response = client.{py_operation_name}', + include_signature=False, + ) + _api_call.__doc__ = docstring + return _api_call + + def _evaluate_client_specific_token(self, signing_name): + # Resolves an auth_token for the given signing_name. + # Returns None if no resolver is set or if resolution fails. + resolver = self._auth_token_resolver + if not resolver or not signing_name: + return None + + return resolver(signing_name=signing_name) + + +class ClientEndpointBridge: + """Bridges endpoint data and client creation + + This class handles taking out the relevant arguments from the endpoint + resolver and determining which values to use, taking into account any + client configuration options and scope configuration options. + + This class also handles determining what, if any, region to use if no + explicit region setting is provided. For example, Amazon S3 client will + utilize "us-east-1" by default if no region can be resolved.""" + + DEFAULT_ENDPOINT = '{service}.{region}.amazonaws.com' + _DUALSTACK_CUSTOMIZED_SERVICES = ['s3', 's3-control'] + + def __init__( + self, + endpoint_resolver, + scoped_config=None, + client_config=None, + default_endpoint=None, + service_signing_name=None, + config_store=None, + service_signature_version=None, + ): + self.service_signing_name = service_signing_name + self.endpoint_resolver = endpoint_resolver + self.scoped_config = scoped_config + self.client_config = client_config + self.default_endpoint = default_endpoint or self.DEFAULT_ENDPOINT + self.config_store = config_store + self.service_signature_version = service_signature_version + + def resolve( + self, service_name, region_name=None, endpoint_url=None, is_secure=True + ): + region_name = self._check_default_region(service_name, region_name) + use_dualstack_endpoint = self._resolve_use_dualstack_endpoint( + service_name + ) + use_fips_endpoint = self._resolve_endpoint_variant_config_var( + 'use_fips_endpoint' + ) + resolved = self.endpoint_resolver.construct_endpoint( + service_name, + region_name, + use_dualstack_endpoint=use_dualstack_endpoint, + use_fips_endpoint=use_fips_endpoint, + ) + + # If we can't resolve the region, we'll attempt to get a global + # endpoint for non-regionalized services (iam, route53, etc) + if not resolved: + # TODO: fallback partition_name should be configurable in the + # future for users to define as needed. + resolved = self.endpoint_resolver.construct_endpoint( + service_name, + region_name, + partition_name='aws', + use_dualstack_endpoint=use_dualstack_endpoint, + use_fips_endpoint=use_fips_endpoint, + ) + + if resolved: + return self._create_endpoint( + resolved, service_name, region_name, endpoint_url, is_secure + ) + else: + return self._assume_endpoint( + service_name, region_name, endpoint_url, is_secure + ) + + def resolver_uses_builtin_data(self): + return self.endpoint_resolver.uses_builtin_data + + def _check_default_region(self, service_name, region_name): + if region_name is not None: + return region_name + # Use the client_config region if no explicit region was provided. + if self.client_config and self.client_config.region_name is not None: + return self.client_config.region_name + + def _create_endpoint( + self, resolved, service_name, region_name, endpoint_url, is_secure + ): + region_name, signing_region = self._pick_region_values( + resolved, region_name, endpoint_url + ) + if endpoint_url is None: + endpoint_url = self._make_url( + resolved.get('hostname'), + is_secure, + resolved.get('protocols', []), + ) + signature_version = self._resolve_signature_version( + service_name, resolved + ) + signing_name = self._resolve_signing_name(service_name, resolved) + return self._create_result( + service_name=service_name, + region_name=region_name, + signing_region=signing_region, + signing_name=signing_name, + endpoint_url=endpoint_url, + metadata=resolved, + signature_version=signature_version, + ) + + def _resolve_endpoint_variant_config_var(self, config_var): + client_config = self.client_config + config_val = False + + # Client configuration arg has precedence + if client_config and getattr(client_config, config_var) is not None: + return getattr(client_config, config_var) + elif self.config_store is not None: + # Check config store + config_val = self.config_store.get_config_variable(config_var) + return config_val + + def _resolve_use_dualstack_endpoint(self, service_name): + s3_dualstack_mode = self._is_s3_dualstack_mode(service_name) + if s3_dualstack_mode is not None: + return s3_dualstack_mode + return self._resolve_endpoint_variant_config_var( + 'use_dualstack_endpoint' + ) + + def _is_s3_dualstack_mode(self, service_name): + if service_name not in self._DUALSTACK_CUSTOMIZED_SERVICES: + return None + # TODO: This normalization logic is duplicated from the + # ClientArgsCreator class. Consolidate everything to + # ClientArgsCreator. _resolve_signature_version also has similarly + # duplicated logic. + client_config = self.client_config + if ( + client_config is not None + and client_config.s3 is not None + and 'use_dualstack_endpoint' in client_config.s3 + ): + # Client config trumps scoped config. + return client_config.s3['use_dualstack_endpoint'] + if self.scoped_config is not None: + enabled = self.scoped_config.get('s3', {}).get( + 'use_dualstack_endpoint' + ) + if enabled in [True, 'True', 'true']: + return True + + def _assume_endpoint( + self, service_name, region_name, endpoint_url, is_secure + ): + if endpoint_url is None: + # Expand the default hostname URI template. + hostname = self.default_endpoint.format( + service=service_name, region=region_name + ) + endpoint_url = self._make_url( + hostname, is_secure, ['http', 'https'] + ) + logger.debug( + 'Assuming an endpoint for %s, %s: %s', + service_name, + region_name, + endpoint_url, + ) + # We still want to allow the user to provide an explicit version. + signature_version = self._resolve_signature_version( + service_name, {'signatureVersions': ['v4']} + ) + signing_name = self._resolve_signing_name(service_name, resolved={}) + return self._create_result( + service_name=service_name, + region_name=region_name, + signing_region=region_name, + signing_name=signing_name, + signature_version=signature_version, + endpoint_url=endpoint_url, + metadata={}, + ) + + def _create_result( + self, + service_name, + region_name, + signing_region, + signing_name, + endpoint_url, + signature_version, + metadata, + ): + return { + 'service_name': service_name, + 'region_name': region_name, + 'signing_region': signing_region, + 'signing_name': signing_name, + 'endpoint_url': endpoint_url, + 'signature_version': signature_version, + 'metadata': metadata, + } + + def _make_url(self, hostname, is_secure, supported_protocols): + if is_secure and 'https' in supported_protocols: + scheme = 'https' + else: + scheme = 'http' + return f'{scheme}://{hostname}' + + def _resolve_signing_name(self, service_name, resolved): + # CredentialScope overrides everything else. + if ( + 'credentialScope' in resolved + and 'service' in resolved['credentialScope'] + ): + return resolved['credentialScope']['service'] + # Use the signingName from the model if present. + if self.service_signing_name: + return self.service_signing_name + # Just assume is the same as the service name. + return service_name + + def _pick_region_values(self, resolved, region_name, endpoint_url): + signing_region = region_name + if endpoint_url is None: + # Do not use the region name or signing name from the resolved + # endpoint if the user explicitly provides an endpoint_url. This + # would happen if we resolve to an endpoint where the service has + # a "defaults" section that overrides all endpoint with a single + # hostname and credentialScope. This has been the case historically + # for how STS has worked. The only way to resolve an STS endpoint + # was to provide a region_name and an endpoint_url. In that case, + # we would still resolve an endpoint, but we would not use the + # resolved endpointName or signingRegion because we want to allow + # custom endpoints. + region_name = resolved['endpointName'] + signing_region = region_name + if ( + 'credentialScope' in resolved + and 'region' in resolved['credentialScope'] + ): + signing_region = resolved['credentialScope']['region'] + return region_name, signing_region + + def _resolve_signature_version(self, service_name, resolved): + configured_version = _get_configured_signature_version( + service_name, self.client_config, self.scoped_config + ) + if configured_version is not None: + return configured_version + + # These have since added the "auth" key to the service model + # with "aws.auth#sigv4", but preserve existing behavior from + # when we preferred endpoints.json over the service models + if service_name in ('s3', 's3-control'): + return 's3v4' + + if self.service_signature_version is not None: + # Prefer the service model + potential_versions = [self.service_signature_version] + else: + # Fall back to endpoints.json to preserve existing behavior, which + # may be useful for users who have custom service models + potential_versions = resolved.get('signatureVersions', []) + # This was added for the V2 -> V4 transition, + # for services that added V4 after V2 in endpoints.json + if 'v4' in potential_versions: + return 'v4' + # Now just iterate over the signature versions in order until we + # find the first one that is known to Botocore. + for known in potential_versions: + if known in AUTH_TYPE_MAPS: + return known + + raise UnknownSignatureVersionError( + signature_version=potential_versions + ) + + +class BaseClient: + # This is actually reassigned with the py->op_name mapping + # when the client creator creates the subclass. This value is used + # because calls such as client.get_paginator('list_objects') use the + # snake_case name, but we need to know the ListObjects form. + # xform_name() does the ListObjects->list_objects conversion, but + # we need the reverse mapping here. + _PY_TO_OP_NAME = {} + + def __init__( + self, + serializer, + endpoint, + response_parser, + event_emitter, + request_signer, + service_model, + loader, + client_config, + partition, + exceptions_factory, + endpoint_ruleset_resolver=None, + user_agent_creator=None, + ): + self._serializer = serializer + self._endpoint = endpoint + self._ruleset_resolver = endpoint_ruleset_resolver + self._response_parser = response_parser + self._request_signer = request_signer + self._cache = {} + self._loader = loader + self._client_config = client_config + self.meta = ClientMeta( + event_emitter, + self._client_config, + endpoint.host, + service_model, + self._PY_TO_OP_NAME, + partition, + ) + self._exceptions_factory = exceptions_factory + self._exceptions = None + self._user_agent_creator = user_agent_creator + if self._user_agent_creator is None: + self._user_agent_creator = ( + UserAgentString.from_environment().with_client_config( + self._client_config + ) + ) + self._register_handlers() + + def __getattr__(self, item): + service_id = self._service_model.service_id.hyphenize() + event_name = f'getattr.{service_id}.{item}' + + handler, event_response = self.meta.events.emit_until_response( + event_name, client=self + ) + + if event_response is not None: + return event_response + + raise AttributeError( + f"'{self.__class__.__name__}' object has no attribute '{item}'" + ) + + def close(self): + """Closes underlying endpoint connections.""" + self._endpoint.close() + + def _register_handlers(self): + # Register the handler required to sign requests. + service_id = self.meta.service_model.service_id.hyphenize() + self.meta.events.register( + f"request-created.{service_id}", self._request_signer.handler + ) + # Rebuild user agent string right before request is sent + # to ensure all registered features are included. + self.meta.events.register_last( + f"request-created.{service_id}", + self._user_agent_creator.rebuild_and_replace_user_agent_handler, + ) + + @property + def _service_model(self): + return self.meta.service_model + + @with_current_context() + def _make_api_call(self, operation_name, api_params): + operation_model = self._service_model.operation_model(operation_name) + service_name = self._service_model.service_name + history_recorder.record( + 'API_CALL', + { + 'service': service_name, + 'operation': operation_name, + 'params': api_params, + }, + ) + if operation_model.deprecated: + logger.debug( + 'Warning: %s.%s() is deprecated', service_name, operation_name + ) + request_context = { + 'client_region': self.meta.region_name, + 'client_config': self.meta.config, + 'has_streaming_input': operation_model.has_streaming_input, + 'auth_type': operation_model.resolved_auth_type, + 'unsigned_payload': operation_model.unsigned_payload, + 'auth_options': self._service_model.metadata.get('auth'), + } + + api_params = self._emit_api_params( + api_params=api_params, + operation_model=operation_model, + context=request_context, + ) + ( + endpoint_url, + additional_headers, + properties, + ) = self._resolve_endpoint_ruleset( + operation_model, api_params, request_context + ) + if properties: + # Pass arbitrary endpoint info with the Request + # for use during construction. + request_context['endpoint_properties'] = properties + request_dict = self._convert_to_request_dict( + api_params=api_params, + operation_model=operation_model, + endpoint_url=endpoint_url, + context=request_context, + headers=additional_headers, + ) + resolve_checksum_context(request_dict, operation_model, api_params) + + service_id = self._service_model.service_id.hyphenize() + handler, event_response = self.meta.events.emit_until_response( + f'before-call.{service_id}.{operation_name}', + model=operation_model, + params=request_dict, + request_signer=self._request_signer, + context=request_context, + ) + + if event_response is not None: + http, parsed_response = event_response + else: + maybe_compress_request( + self.meta.config, request_dict, operation_model + ) + apply_request_checksum(request_dict) + http, parsed_response = self._make_request( + operation_model, request_dict, request_context + ) + + self.meta.events.emit( + f'after-call.{service_id}.{operation_name}', + http_response=http, + parsed=parsed_response, + model=operation_model, + context=request_context, + ) + + if http.status_code >= 300: + error_info = parsed_response.get("Error", {}) + error_code = request_context.get( + 'error_code_override' + ) or error_info.get("Code") + error_class = self.exceptions.from_code(error_code) + raise error_class(parsed_response, operation_name) + else: + return parsed_response + + def _make_request(self, operation_model, request_dict, request_context): + try: + return self._endpoint.make_request(operation_model, request_dict) + except Exception as e: + self.meta.events.emit( + f'after-call-error.{self._service_model.service_id.hyphenize()}.{operation_model.name}', + exception=e, + context=request_context, + ) + raise + + def _convert_to_request_dict( + self, + api_params, + operation_model, + endpoint_url, + context=None, + headers=None, + set_user_agent_header=True, + ): + request_dict = self._serializer.serialize_to_request( + api_params, operation_model + ) + if not self._client_config.inject_host_prefix: + request_dict.pop('host_prefix', None) + if headers is not None: + request_dict['headers'].update(headers) + if set_user_agent_header: + user_agent = self._user_agent_creator.to_string() + else: + user_agent = None + prepare_request_dict( + request_dict, + endpoint_url=endpoint_url, + user_agent=user_agent, + context=context, + ) + return request_dict + + def _emit_api_params(self, api_params, operation_model, context): + # Given the API params provided by the user and the operation_model + # we can serialize the request to a request_dict. + operation_name = operation_model.name + + # Emit an event that allows users to modify the parameters at the + # beginning of the method. It allows handlers to modify existing + # parameters or return a new set of parameters to use. + service_id = self._service_model.service_id.hyphenize() + responses = self.meta.events.emit( + f'provide-client-params.{service_id}.{operation_name}', + params=api_params, + model=operation_model, + context=context, + ) + api_params = first_non_none_response(responses, default=api_params) + + self.meta.events.emit( + f'before-parameter-build.{service_id}.{operation_name}', + params=api_params, + model=operation_model, + context=context, + ) + return api_params + + def _resolve_endpoint_ruleset( + self, + operation_model, + params, + request_context, + ignore_signing_region=False, + ): + """Returns endpoint URL and list of additional headers returned from + EndpointRulesetResolver for the given operation and params. If the + ruleset resolver is not available, for example because the service has + no endpoints ruleset file, the legacy endpoint resolver's value is + returned. + + Use ignore_signing_region for generating presigned URLs or any other + situation where the signing region information from the ruleset + resolver should be ignored. + + Returns tuple of URL and headers dictionary. Additionally, the + request_context dict is modified in place with any signing information + returned from the ruleset resolver. + """ + if self._ruleset_resolver is None: + endpoint_url = self.meta.endpoint_url + additional_headers = {} + endpoint_properties = {} + else: + endpoint_info = self._ruleset_resolver.construct_endpoint( + operation_model=operation_model, + call_args=params, + request_context=request_context, + ) + endpoint_url = endpoint_info.url + additional_headers = endpoint_info.headers + endpoint_properties = endpoint_info.properties + # If authSchemes is present, overwrite default auth type and + # signing context derived from service model. + auth_schemes = endpoint_info.properties.get('authSchemes') + if auth_schemes is not None: + auth_info = self._ruleset_resolver.auth_schemes_to_signing_ctx( + auth_schemes + ) + auth_type, signing_context = auth_info + request_context['auth_type'] = auth_type + if 'region' in signing_context and ignore_signing_region: + del signing_context['region'] + if 'signing' in request_context: + request_context['signing'].update(signing_context) + else: + request_context['signing'] = signing_context + + return endpoint_url, additional_headers, endpoint_properties + + def get_paginator(self, operation_name): + """Create a paginator for an operation. + + :type operation_name: string + :param operation_name: The operation name. This is the same name + as the method name on the client. For example, if the + method name is ``create_foo``, and you'd normally invoke the + operation as ``client.create_foo(**kwargs)``, if the + ``create_foo`` operation can be paginated, you can use the + call ``client.get_paginator("create_foo")``. + + :raise OperationNotPageableError: Raised if the operation is not + pageable. You can use the ``client.can_paginate`` method to + check if an operation is pageable. + + :rtype: ``botocore.paginate.Paginator`` + :return: A paginator object. + + """ + if not self.can_paginate(operation_name): + raise OperationNotPageableError(operation_name=operation_name) + else: + actual_operation_name = self._PY_TO_OP_NAME[operation_name] + + # Create a new paginate method that will serve as a proxy to + # the underlying Paginator.paginate method. This is needed to + # attach a docstring to the method. + def paginate(self, **kwargs): + return Paginator.paginate(self, **kwargs) + + paginator_config = self._cache['page_config'][ + actual_operation_name + ] + # Add the docstring for the paginate method. + paginate.__doc__ = PaginatorDocstring( + paginator_name=actual_operation_name, + event_emitter=self.meta.events, + service_model=self.meta.service_model, + paginator_config=paginator_config, + include_signature=False, + ) + + # Rename the paginator class based on the type of paginator. + service_module_name = get_service_module_name( + self.meta.service_model + ) + paginator_class_name = ( + f"{service_module_name}.Paginator.{actual_operation_name}" + ) + + # Create the new paginator class + documented_paginator_cls = type( + paginator_class_name, (Paginator,), {'paginate': paginate} + ) + + operation_model = self._service_model.operation_model( + actual_operation_name + ) + paginator = documented_paginator_cls( + getattr(self, operation_name), + paginator_config, + operation_model, + ) + return paginator + + def can_paginate(self, operation_name): + """Check if an operation can be paginated. + + :type operation_name: string + :param operation_name: The operation name. This is the same name + as the method name on the client. For example, if the + method name is ``create_foo``, and you'd normally invoke the + operation as ``client.create_foo(**kwargs)``, if the + ``create_foo`` operation can be paginated, you can use the + call ``client.get_paginator("create_foo")``. + + :return: ``True`` if the operation can be paginated, + ``False`` otherwise. + + """ + if 'page_config' not in self._cache: + try: + page_config = self._loader.load_service_model( + self._service_model.service_name, + 'paginators-1', + self._service_model.api_version, + )['pagination'] + self._cache['page_config'] = page_config + except DataNotFoundError: + self._cache['page_config'] = {} + actual_operation_name = self._PY_TO_OP_NAME[operation_name] + return actual_operation_name in self._cache['page_config'] + + def _get_waiter_config(self): + if 'waiter_config' not in self._cache: + try: + waiter_config = self._loader.load_service_model( + self._service_model.service_name, + 'waiters-2', + self._service_model.api_version, + ) + self._cache['waiter_config'] = waiter_config + except DataNotFoundError: + self._cache['waiter_config'] = {} + return self._cache['waiter_config'] + + def get_waiter(self, waiter_name): + """Returns an object that can wait for some condition. + + :type waiter_name: str + :param waiter_name: The name of the waiter to get. See the waiters + section of the service docs for a list of available waiters. + + :returns: The specified waiter object. + :rtype: ``botocore.waiter.Waiter`` + """ + config = self._get_waiter_config() + if not config: + raise ValueError(f"Waiter does not exist: {waiter_name}") + model = waiter.WaiterModel(config) + mapping = {} + for name in model.waiter_names: + mapping[xform_name(name)] = name + if waiter_name not in mapping: + raise ValueError(f"Waiter does not exist: {waiter_name}") + + return waiter.create_waiter_with_client( + mapping[waiter_name], model, self + ) + + @CachedProperty + def waiter_names(self): + """Returns a list of all available waiters.""" + config = self._get_waiter_config() + if not config: + return [] + model = waiter.WaiterModel(config) + # Waiter configs is a dict, we just want the waiter names + # which are the keys in the dict. + return [xform_name(name) for name in model.waiter_names] + + @property + def exceptions(self): + if self._exceptions is None: + self._exceptions = self._load_exceptions() + return self._exceptions + + def _load_exceptions(self): + return self._exceptions_factory.create_client_exceptions( + self._service_model + ) + + def _get_credentials(self): + """ + This private interface is subject to abrupt breaking changes, including + removal, in any botocore release. + """ + return self._request_signer._credentials + + +class ClientMeta: + """Holds additional client methods. + + This class holds additional information for clients. It exists for + two reasons: + + * To give advanced functionality to clients + * To namespace additional client attributes from the operation + names which are mapped to methods at runtime. This avoids + ever running into collisions with operation names. + + """ + + def __init__( + self, + events, + client_config, + endpoint_url, + service_model, + method_to_api_mapping, + partition, + ): + self.events = events + self._client_config = client_config + self._endpoint_url = endpoint_url + self._service_model = service_model + self._method_to_api_mapping = method_to_api_mapping + self._partition = partition + + @property + def service_model(self): + return self._service_model + + @property + def region_name(self): + return self._client_config.region_name + + @property + def endpoint_url(self): + return self._endpoint_url + + @property + def config(self): + return self._client_config + + @property + def method_to_api_mapping(self): + return self._method_to_api_mapping + + @property + def partition(self): + return self._partition + + +def _get_configured_signature_version( + service_name, client_config, scoped_config +): + """ + Gets the manually configured signature version. + + :returns: the customer configured signature version, or None if no + signature version was configured. + """ + # Client config overrides everything. + if client_config and client_config.signature_version is not None: + return client_config.signature_version + + # Scoped config overrides picking from the endpoint metadata. + if scoped_config is not None: + # A given service may have service specific configuration in the + # config file, so we need to check there as well. + service_config = scoped_config.get(service_name) + if service_config is not None and isinstance(service_config, dict): + version = service_config.get('signature_version') + if version: + logger.debug( + "Switching signature version for service %s " + "to version %s based on config file override.", + service_name, + version, + ) + return version + return None diff --git a/py311/lib/python3.11/site-packages/botocore/compat.py b/py311/lib/python3.11/site-packages/botocore/compat.py new file mode 100644 index 0000000000000000000000000000000000000000..62a265c4c6c80a142c1950abade84f55ba5927d1 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/compat.py @@ -0,0 +1,371 @@ +# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +import copy +import datetime +import sys +import inspect +import warnings +import hashlib +from http.client import HTTPMessage +import logging +import shlex +import re +import os +from collections import OrderedDict +from collections.abc import MutableMapping +from math import floor + +from botocore.vendored import six +from botocore.exceptions import MD5UnavailableError +from dateutil.tz import tzlocal +from urllib3 import exceptions + +logger = logging.getLogger(__name__) + + +class HTTPHeaders(HTTPMessage): + pass + +from urllib.parse import ( + quote, + urlencode, + unquote, + unquote_plus, + urlparse, + urlsplit, + urlunsplit, + urljoin, + parse_qsl, + parse_qs, +) +from http.client import HTTPResponse +from io import IOBase as _IOBase +from base64 import encodebytes +from email.utils import formatdate +from itertools import zip_longest +file_type = _IOBase +zip = zip + +# In python3, unquote takes a str() object, url decodes it, +# then takes the bytestring and decodes it to utf-8. +unquote_str = unquote_plus + +def set_socket_timeout(http_response, timeout): + """Set the timeout of the socket from an HTTPResponse. + + :param http_response: An instance of ``httplib.HTTPResponse`` + + """ + http_response._fp.fp.raw._sock.settimeout(timeout) + +def accepts_kwargs(func): + return inspect.getfullargspec(func)[2] + +def ensure_unicode(s, encoding=None, errors=None): + # NOOP in Python 3, because every string is already unicode + return s + +def ensure_bytes(s, encoding='utf-8', errors='strict'): + if isinstance(s, str): + return s.encode(encoding, errors) + if isinstance(s, bytes): + return s + raise ValueError(f"Expected str or bytes, received {type(s)}.") + + +import xml.etree.ElementTree as ETree +XMLParseError = ETree.ParseError + +import json + + +def filter_ssl_warnings(): + # Ignore warnings related to SNI as it is not being used in validations. + warnings.filterwarnings( + 'ignore', + message="A true SSLContext object is not available.*", + category=exceptions.InsecurePlatformWarning, + module=r".*urllib3\.util\.ssl_", + ) + + +@classmethod +def from_dict(cls, d): + new_instance = cls() + for key, value in d.items(): + new_instance[key] = value + return new_instance + + +@classmethod +def from_pairs(cls, pairs): + new_instance = cls() + for key, value in pairs: + new_instance[key] = value + return new_instance + + +HTTPHeaders.from_dict = from_dict +HTTPHeaders.from_pairs = from_pairs + + +def copy_kwargs(kwargs): + """ + This used to be a compat shim for 2.6 but is now just an alias. + """ + copy_kwargs = copy.copy(kwargs) + return copy_kwargs + + +def total_seconds(delta): + """ + Returns the total seconds in a ``datetime.timedelta``. + + This used to be a compat shim for 2.6 but is now just an alias. + + :param delta: The timedelta object + :type delta: ``datetime.timedelta`` + """ + return delta.total_seconds() + + +# Checks to see if md5 is available on this system. A given system might not +# have access to it for various reasons, such as FIPS mode being enabled. +try: + hashlib.md5(usedforsecurity=False) + MD5_AVAILABLE = True +except (AttributeError, ValueError): + MD5_AVAILABLE = False + + +def get_md5(*args, **kwargs): + """ + Attempts to get an md5 hashing object. + + :param args: Args to pass to the MD5 constructor + :param kwargs: Key word arguments to pass to the MD5 constructor + :return: An MD5 hashing object if available. If it is unavailable, None + is returned if raise_error_if_unavailable is set to False. + """ + if MD5_AVAILABLE: + return hashlib.md5(*args, **kwargs) + else: + raise MD5UnavailableError() + + +def compat_shell_split(s, platform=None): + if platform is None: + platform = sys.platform + + if platform == "win32": + return _windows_shell_split(s) + else: + return shlex.split(s) + + +def _windows_shell_split(s): + """Splits up a windows command as the built-in command parser would. + + Windows has potentially bizarre rules depending on where you look. When + spawning a process via the Windows C runtime (which is what python does + when you call popen) the rules are as follows: + + https://docs.microsoft.com/en-us/cpp/cpp/parsing-cpp-command-line-arguments + + To summarize: + + * Only space and tab are valid delimiters + * Double quotes are the only valid quotes + * Backslash is interpreted literally unless it is part of a chain that + leads up to a double quote. Then the backslashes escape the backslashes, + and if there is an odd number the final backslash escapes the quote. + + :param s: The command string to split up into parts. + :return: A list of command components. + """ + if not s: + return [] + + components = [] + buff = [] + is_quoted = False + num_backslashes = 0 + for character in s: + if character == '\\': + # We can't simply append backslashes because we don't know if + # they are being used as escape characters or not. Instead we + # keep track of how many we've encountered and handle them when + # we encounter a different character. + num_backslashes += 1 + elif character == '"': + if num_backslashes > 0: + # The backslashes are in a chain leading up to a double + # quote, so they are escaping each other. + buff.append('\\' * int(floor(num_backslashes / 2))) + remainder = num_backslashes % 2 + num_backslashes = 0 + if remainder == 1: + # The number of backslashes is uneven, so they are also + # escaping the double quote, so it needs to be added to + # the current component buffer. + buff.append('"') + continue + + # We've encountered a double quote that is not escaped, + # so we toggle is_quoted. + is_quoted = not is_quoted + + # If there are quotes, then we may want an empty string. To be + # safe, we add an empty string to the buffer so that we make + # sure it sticks around if there's nothing else between quotes. + # If there is other stuff between quotes, the empty string will + # disappear during the joining process. + buff.append('') + elif character in [' ', '\t'] and not is_quoted: + # Since the backslashes aren't leading up to a quote, we put in + # the exact number of backslashes. + if num_backslashes > 0: + buff.append('\\' * num_backslashes) + num_backslashes = 0 + + # Excess whitespace is ignored, so only add the components list + # if there is anything in the buffer. + if buff: + components.append(''.join(buff)) + buff = [] + else: + # Since the backslashes aren't leading up to a quote, we put in + # the exact number of backslashes. + if num_backslashes > 0: + buff.append('\\' * num_backslashes) + num_backslashes = 0 + buff.append(character) + + # Quotes must be terminated. + if is_quoted: + raise ValueError(f"No closing quotation in string: {s}") + + # There may be some leftover backslashes, so we need to add them in. + # There's no quote so we add the exact number. + if num_backslashes > 0: + buff.append('\\' * num_backslashes) + + # Add the final component in if there is anything in the buffer. + if buff: + components.append(''.join(buff)) + + return components + + +def get_tzinfo_options(): + # Due to dateutil/dateutil#197, Windows may fail to parse times in the past + # with the system clock. We can alternatively fallback to tzwininfo when + # this happens, which will get time info from the Windows registry. + if sys.platform == 'win32': + from dateutil.tz import tzwinlocal + + return (tzlocal, tzwinlocal) + else: + return (tzlocal,) + + +# Detect if CRT is available for use +try: + import awscrt.auth + + # Allow user opt-out if needed + disabled = os.environ.get('BOTO_DISABLE_CRT', "false") + HAS_CRT = not disabled.lower() == 'true' +except ImportError: + HAS_CRT = False + + +def has_minimum_crt_version(minimum_version): + """Not intended for use outside botocore.""" + if not HAS_CRT: + return False + + crt_version_str = awscrt.__version__ + try: + crt_version_ints = map(int, crt_version_str.split(".")) + crt_version_tuple = tuple(crt_version_ints) + except (TypeError, ValueError): + return False + + return crt_version_tuple >= minimum_version + + +def get_current_datetime(remove_tzinfo=True): + """Retrieve the current timezone in UTC, with or without an explicit timezone.""" + datetime_now = datetime.datetime.now(datetime.timezone.utc) + if remove_tzinfo: + datetime_now = datetime_now.replace(tzinfo=None) + return datetime_now + + +######################################################## +# urllib3 compat backports # +######################################################## + +# Vendoring IPv6 validation regex patterns from urllib3 +# https://github.com/urllib3/urllib3/blob/7e856c0/src/urllib3/util/url.py +IPV4_PAT = r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}" +IPV4_RE = re.compile("^" + IPV4_PAT + "$") +HEX_PAT = "[0-9A-Fa-f]{1,4}" +LS32_PAT = "(?:{hex}:{hex}|{ipv4})".format(hex=HEX_PAT, ipv4=IPV4_PAT) +_subs = {"hex": HEX_PAT, "ls32": LS32_PAT} +_variations = [ + # 6( h16 ":" ) ls32 + "(?:%(hex)s:){6}%(ls32)s", + # "::" 5( h16 ":" ) ls32 + "::(?:%(hex)s:){5}%(ls32)s", + # [ h16 ] "::" 4( h16 ":" ) ls32 + "(?:%(hex)s)?::(?:%(hex)s:){4}%(ls32)s", + # [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32 + "(?:(?:%(hex)s:)?%(hex)s)?::(?:%(hex)s:){3}%(ls32)s", + # [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32 + "(?:(?:%(hex)s:){0,2}%(hex)s)?::(?:%(hex)s:){2}%(ls32)s", + # [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32 + "(?:(?:%(hex)s:){0,3}%(hex)s)?::%(hex)s:%(ls32)s", + # [ *4( h16 ":" ) h16 ] "::" ls32 + "(?:(?:%(hex)s:){0,4}%(hex)s)?::%(ls32)s", + # [ *5( h16 ":" ) h16 ] "::" h16 + "(?:(?:%(hex)s:){0,5}%(hex)s)?::%(hex)s", + # [ *6( h16 ":" ) h16 ] "::" + "(?:(?:%(hex)s:){0,6}%(hex)s)?::", +] + +UNRESERVED_PAT = ( + r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._!\-~" +) +IPV6_PAT = "(?:" + "|".join([x % _subs for x in _variations]) + ")" +ZONE_ID_PAT = "(?:%25|%)(?:[" + UNRESERVED_PAT + "]|%[a-fA-F0-9]{2})+" +IPV6_ADDRZ_PAT = r"\[" + IPV6_PAT + r"(?:" + ZONE_ID_PAT + r")?\]" +IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT + "$") + +# These are the characters that are stripped by post-bpo-43882 urlparse(). +UNSAFE_URL_CHARS = frozenset('\t\r\n') + +# Detect if gzip is available for use +try: + import gzip + HAS_GZIP = True +except ImportError: + HAS_GZIP = False + +# Conditional import for awscrt EC crypto functionality +if HAS_CRT and has_minimum_crt_version((0, 28, 4)): + from awscrt.crypto import EC +else: + EC = None diff --git a/py311/lib/python3.11/site-packages/botocore/compress.py b/py311/lib/python3.11/site-packages/botocore/compress.py new file mode 100644 index 0000000000000000000000000000000000000000..d3dac6f0fc2f5494001b09299922a089849a1fdf --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/compress.py @@ -0,0 +1,128 @@ +# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +""" +NOTE: All functions in this module are considered private and are +subject to abrupt breaking changes. Please do not use them directly. + +""" + +import io +import logging +from gzip import GzipFile +from gzip import compress as gzip_compress + +from botocore.compat import urlencode +from botocore.useragent import register_feature_id +from botocore.utils import determine_content_length + +logger = logging.getLogger(__name__) + + +def maybe_compress_request(config, request_dict, operation_model): + """Attempt to compress the request body using the modeled encodings.""" + if _should_compress_request(config, request_dict, operation_model): + for encoding in operation_model.request_compression['encodings']: + encoder = COMPRESSION_MAPPING.get(encoding) + if encoder is not None: + logger.debug('Compressing request with %s encoding.', encoding) + request_dict['body'] = encoder(request_dict['body']) + _set_compression_header(request_dict['headers'], encoding) + return + else: + logger.debug('Unsupported compression encoding: %s', encoding) + + +def _should_compress_request(config, request_dict, operation_model): + if ( + config.disable_request_compression is not True + and config.signature_version != 'v2' + and operation_model.request_compression is not None + ): + if not _is_compressible_type(request_dict): + body_type = type(request_dict['body']) + log_msg = 'Body type %s does not support compression.' + logger.debug(log_msg, body_type) + return False + + if operation_model.has_streaming_input: + streaming_input = operation_model.get_streaming_input() + streaming_metadata = streaming_input.metadata + return 'requiresLength' not in streaming_metadata + + body_size = _get_body_size(request_dict['body']) + min_size = config.request_min_compression_size_bytes + return min_size <= body_size + + return False + + +def _is_compressible_type(request_dict): + body = request_dict['body'] + # Coerce dict to a format compatible with compression. + if isinstance(body, dict): + body = urlencode(body, doseq=True, encoding='utf-8').encode('utf-8') + request_dict['body'] = body + is_supported_type = isinstance(body, (str, bytes, bytearray)) + return is_supported_type or hasattr(body, 'read') + + +def _get_body_size(body): + size = determine_content_length(body) + if size is None: + logger.debug( + 'Unable to get length of the request body: %s. ' + 'Skipping compression.', + body, + ) + size = 0 + return size + + +def _gzip_compress_body(body): + register_feature_id('GZIP_REQUEST_COMPRESSION') + if isinstance(body, str): + return gzip_compress(body.encode('utf-8')) + elif isinstance(body, (bytes, bytearray)): + return gzip_compress(body) + elif hasattr(body, 'read'): + if hasattr(body, 'seek') and hasattr(body, 'tell'): + current_position = body.tell() + compressed_obj = _gzip_compress_fileobj(body) + body.seek(current_position) + return compressed_obj + return _gzip_compress_fileobj(body) + + +def _gzip_compress_fileobj(body): + compressed_obj = io.BytesIO() + with GzipFile(fileobj=compressed_obj, mode='wb') as gz: + while True: + chunk = body.read(8192) + if not chunk: + break + if isinstance(chunk, str): + chunk = chunk.encode('utf-8') + gz.write(chunk) + compressed_obj.seek(0) + return compressed_obj + + +def _set_compression_header(headers, encoding): + ce_header = headers.get('Content-Encoding') + if ce_header is None: + headers['Content-Encoding'] = encoding + else: + headers['Content-Encoding'] = f'{ce_header},{encoding}' + + +COMPRESSION_MAPPING = {'gzip': _gzip_compress_body} diff --git a/py311/lib/python3.11/site-packages/botocore/config.py b/py311/lib/python3.11/site-packages/botocore/config.py new file mode 100644 index 0000000000000000000000000000000000000000..e0ca5a71eb61dba9aacf71d856f4661d1575eaf2 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/config.py @@ -0,0 +1,477 @@ +# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import copy + +from botocore.compat import OrderedDict +from botocore.endpoint import DEFAULT_TIMEOUT, MAX_POOL_CONNECTIONS +from botocore.exceptions import ( + InvalidMaxRetryAttemptsError, + InvalidRetryConfigurationError, + InvalidRetryModeError, + InvalidS3AddressingStyleError, +) + + +class Config: + """Advanced configuration for Botocore clients. + + :type region_name: str + :param region_name: The region to use in instantiating the client + + :type signature_version: str + :param signature_version: The signature version when signing requests. + + :type user_agent: str + :param user_agent: The value to use in the User-Agent header. + + :type user_agent_extra: str + :param user_agent_extra: The value to append to the current User-Agent + header value. + + :type user_agent_appid: str + :param user_agent_appid: A value that gets included in the User-Agent + string in the format "app/". Allowed characters are + ASCII alphanumerics and ``!#$%&'*+-.^_`|~``. All other characters will + be replaced by a ``-``. + + :type connect_timeout: float or int + :param connect_timeout: The time in seconds till a timeout exception is + thrown when attempting to make a connection. The default is 60 + seconds. + + :type read_timeout: float or int + :param read_timeout: The time in seconds till a timeout exception is + thrown when attempting to read from a connection. The default is + 60 seconds. + + :type parameter_validation: bool + :param parameter_validation: Whether parameter validation should occur + when serializing requests. The default is True. You can disable + parameter validation for performance reasons. Otherwise, it's + recommended to leave parameter validation enabled. + + :type max_pool_connections: int + :param max_pool_connections: The maximum number of connections to + keep in a connection pool. If this value is not set, the default + value of 10 is used. + + :type proxies: dict + :param proxies: A dictionary of proxy servers to use by protocol or + endpoint, e.g.: + ``{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}``. + The proxies are used on each request. + + :type proxies_config: dict + :param proxies_config: A dictionary of additional proxy configurations. + Valid keys are: + + * ``proxy_ca_bundle`` -- The path to a custom certificate bundle to use + when establishing SSL/TLS connections with proxy. + + * ``proxy_client_cert`` -- The path to a certificate for proxy + TLS client authentication. + + When a string is provided it is treated as a path to a proxy client + certificate. When a two element tuple is provided, it will be + interpreted as the path to the client certificate, and the path + to the certificate key. + + * ``proxy_use_forwarding_for_https`` -- For HTTPS proxies, + forward your requests to HTTPS destinations with an absolute + URI. We strongly recommend you only use this option with + trusted or corporate proxies. Value must be boolean. + + :type s3: dict + :param s3: A dictionary of S3 specific configurations. + Valid keys are: + + * ``use_accelerate_endpoint`` -- Refers to whether to use the S3 + Accelerate endpoint. The value must be a boolean. If True, the + client will use the S3 Accelerate endpoint. If the S3 Accelerate + endpoint is being used then the addressing style will always + be virtual. + + * ``payload_signing_enabled`` -- Refers to whether or not to SHA256 + sign SigV4 payloads. For operations that support request checksums, + this only applies when ``request_checksum_calculation`` is set to + ``when_required``. Otherwise, this is disabled for + streaming uploads (UploadPart and PutObject) by default. + + * ``addressing_style`` -- Refers to the style in which to address + s3 endpoints. Values must be a string that equals one of: + + * ``auto`` -- Addressing style is chosen for user. Depending + on the configuration of client, the endpoint may be addressed in + the virtual or the path style. Note that this is the default + behavior if no style is specified. + + * ``virtual`` -- Addressing style is always virtual. The name of the + bucket must be DNS compatible or an exception will be thrown. + Endpoints will be addressed as such: ``amzn-s3-demo-bucket.s3.amazonaws.com`` + + * ``path`` -- Addressing style is always by path. Endpoints will be + addressed as such: ``s3.amazonaws.com/amzn-s3-demo-bucket`` + + * ``us_east_1_regional_endpoint`` -- Refers to what S3 endpoint to use + when the region is configured to be us-east-1. Values must be a + string that equals: + + * ``regional`` -- Use the us-east-1.amazonaws.com endpoint if the + client is configured to use the us-east-1 region. + + * ``legacy`` -- Use the s3.amazonaws.com endpoint if the client is + configured to use the us-east-1 region. This is the default if + the configuration option is not specified. + + + :type retries: dict + :param retries: A dictionary for configuration related to retry behavior. + Valid keys are: + + * ``total_max_attempts`` -- An integer representing the maximum number of + total attempts that will be made on a single request. This includes + the initial request, so a value of 1 indicates that no requests + will be retried. If ``total_max_attempts`` and ``max_attempts`` + are both provided, ``total_max_attempts`` takes precedence. + ``total_max_attempts`` is preferred over ``max_attempts`` because + it maps to the ``AWS_MAX_ATTEMPTS`` environment variable and + the ``max_attempts`` config file value. + * ``max_attempts`` -- An integer representing the maximum number of + retry attempts that will be made on a single request. For + example, setting this value to 2 will result in the request + being retried at most two times after the initial request. Setting + this value to 0 will result in no retries ever being attempted after + the initial request. If not provided, the number of retries will + default to the value specified in the service model, which is + typically four retries. + * ``mode`` -- A string representing the type of retry mode botocore + should use. Valid values are: + + * ``legacy`` - The pre-existing retry behavior. + + * ``standard`` - The standardized set of retry rules. This will also + default to 3 max attempts unless overridden. + + * ``adaptive`` - Retries with additional client side throttling. + + :type client_cert: str, (str, str) + :param client_cert: The path to a certificate for TLS client authentication. + + When a string is provided it is treated as a path to a client + certificate to be used when creating a TLS connection. + + If a client key is to be provided alongside the client certificate the + client_cert should be set to a tuple of length two where the first + element is the path to the client certificate and the second element is + the path to the certificate key. + + :type inject_host_prefix: bool + :param inject_host_prefix: Whether host prefix injection should occur. + + Defaults to None. + + The default of None is equivalent to setting to True, which enables + the injection of operation parameters into the prefix of the hostname. + Setting this to False disables the injection of operation parameters + into the prefix of the hostname. Setting this to False is useful for + clients providing custom endpoints that should not have their host + prefix modified. + + :type use_dualstack_endpoint: bool + :param use_dualstack_endpoint: Setting to True enables dualstack + endpoint resolution. + + Defaults to None. + + :type use_fips_endpoint: bool + :param use_fips_endpoint: Setting to True enables fips + endpoint resolution. + + Defaults to None. + + :type ignore_configured_endpoint_urls: bool + :param ignore_configured_endpoint_urls: Setting to True disables use + of endpoint URLs provided via environment variables and + the shared configuration file. + + Defaults to None. + + :type tcp_keepalive: bool + :param tcp_keepalive: Enables the TCP Keep-Alive socket option used when + creating new connections if set to True. + + Defaults to False. + + :type request_min_compression_size_bytes: int + :param request_min_compression_size_bytes: The minimum size in bytes that a + request body should be to trigger compression. All requests with + streaming input that don't contain the ``requiresLength`` trait will be + compressed regardless of this setting. + + Defaults to None. + + :type disable_request_compression: bool + :param disable_request_compression: Disables request body compression if + set to True. + + Defaults to None. + + :type sigv4a_signing_region_set: string + :param sigv4a_signing_region_set: A set of AWS regions to apply the signature for + when using SigV4a for signing. Set to ``*`` to represent all regions. + + Defaults to None. + + :type client_context_params: dict + :param client_context_params: A dictionary of parameters specific to + individual services. If available, valid parameters can be found in + the ``Client Context Parameters`` section of the service client's + documentation. Invalid parameters or ones that are not used by the + specified service will be ignored. + + Defaults to None. + + :type request_checksum_calculation: str + :param request_checksum_calculation: Determines when a checksum will be + calculated for request payloads. Valid values are: + + * ``when_supported`` -- When set, a checksum will be calculated for + all request payloads of operations modeled with the ``httpChecksum`` + trait where ``requestChecksumRequired`` is ``true`` or a + ``requestAlgorithmMember`` is modeled. + + * ``when_required`` -- When set, a checksum will only be calculated + for request payloads of operations modeled with the ``httpChecksum`` + trait where ``requestChecksumRequired`` is ``true`` or where a + ``requestAlgorithmMember`` is modeled and supplied. + + Defaults to None. + + :type response_checksum_validation: str + :param response_checksum_validation: Determines when checksum validation + will be performed on response payloads. Valid values are: + + * ``when_supported`` -- When set, checksum validation is performed on + all response payloads of operations modeled with the ``httpChecksum`` + trait where ``responseAlgorithms`` is modeled, except when no modeled + checksum algorithms are supported. + + * ``when_required`` -- When set, checksum validation is not performed + on response payloads of operations unless the checksum algorithm is + supported and the ``requestValidationModeMember`` member is set to ``ENABLED``. + + Defaults to None. + + :type account_id_endpoint_mode: str + :param account_id_endpoint_mode: The value used to determine the client's + behavior for account ID based endpoint routing. Valid values are: + + * ``preferred`` - The endpoint should include account ID if available. + * ``disabled`` - A resolved endpoint does not include account ID. + * ``required`` - The endpoint must include account ID. If the account ID + isn't available, an exception will be raised. + + If a value is not provided, the client will default to ``preferred``. + + Defaults to None. + + :type auth_scheme_preference: str + :param auth_scheme_preference: A comma-delimited string of case-sensitive + auth scheme names used to determine the client's auth scheme preference. + + Defaults to None. + """ + + OPTION_DEFAULTS = OrderedDict( + [ + ('region_name', None), + ('signature_version', None), + ('user_agent', None), + ('user_agent_extra', None), + ('user_agent_appid', None), + ('connect_timeout', DEFAULT_TIMEOUT), + ('read_timeout', DEFAULT_TIMEOUT), + ('parameter_validation', True), + ('max_pool_connections', MAX_POOL_CONNECTIONS), + ('proxies', None), + ('proxies_config', None), + ('s3', None), + ('retries', None), + ('client_cert', None), + ('inject_host_prefix', None), + ('endpoint_discovery_enabled', None), + ('use_dualstack_endpoint', None), + ('use_fips_endpoint', None), + ('ignore_configured_endpoint_urls', None), + ('defaults_mode', None), + ('tcp_keepalive', None), + ('request_min_compression_size_bytes', None), + ('disable_request_compression', None), + ('client_context_params', None), + ('sigv4a_signing_region_set', None), + ('request_checksum_calculation', None), + ('response_checksum_validation', None), + ('account_id_endpoint_mode', None), + ('auth_scheme_preference', None), + ] + ) + + NON_LEGACY_OPTION_DEFAULTS = { + 'connect_timeout': None, + } + + # The original default value of the inject_host_prefix parameter was True. + # This prevented the ability to override the value from other locations in + # the parameter provider chain, like env vars or the shared configuration + # file. TO accomplish this, we need to disambiguate when the value was set + # by the user or not. This overrides the parameter with a property so the + # default value of inject_host_prefix is still True if it is not set by the + # user. + @property + def inject_host_prefix(self): + if self._inject_host_prefix == "UNSET": + return True + + return self._inject_host_prefix + + # Override the setter for the case where the user does supply a value; + # _inject_host_prefix will no longer be "UNSET". + @inject_host_prefix.setter + def inject_host_prefix(self, value): + self._inject_host_prefix = value + + def __init__(self, *args, **kwargs): + self._user_provided_options = self._record_user_provided_options( + args, kwargs + ) + + # By default, we use a value that indicates the user did not + # set it. This value MUST persist on the Config object to be used + # elsewhere. + self._inject_host_prefix = 'UNSET' + + # Merge the user_provided options onto the default options + config_vars = copy.copy(self.OPTION_DEFAULTS) + defaults_mode = self._user_provided_options.get( + 'defaults_mode', 'legacy' + ) + if defaults_mode != 'legacy': + config_vars.update(self.NON_LEGACY_OPTION_DEFAULTS) + + config_vars.update(self._user_provided_options) + + # Set the attributes based on the config_vars + for key, value in config_vars.items(): + # Default values for the Config object are set here. We don't want + # to use `setattr` in the case where the user already supplied a + # value. + if ( + key == 'inject_host_prefix' + and 'inject_host_prefix' + not in self._user_provided_options.keys() + ): + continue + setattr(self, key, value) + + # Validate the s3 options + self._validate_s3_configuration(self.s3) + + self._validate_retry_configuration(self.retries) + + def _record_user_provided_options(self, args, kwargs): + option_order = list(self.OPTION_DEFAULTS) + user_provided_options = {} + + # Iterate through the kwargs passed through to the constructor and + # map valid keys to the dictionary + for key, value in kwargs.items(): + if key in self.OPTION_DEFAULTS: + user_provided_options[key] = value + # The key must exist in the available options + else: + raise TypeError(f"Got unexpected keyword argument '{key}'") + + # The number of args should not be longer than the allowed + # options + if len(args) > len(option_order): + raise TypeError( + f"Takes at most {len(option_order)} arguments ({len(args)} given)" + ) + + # Iterate through the args passed through to the constructor and map + # them to appropriate keys. + for i, arg in enumerate(args): + # If a kwarg was specified for the arg, then error out + if option_order[i] in user_provided_options: + raise TypeError( + f"Got multiple values for keyword argument '{option_order[i]}'" + ) + user_provided_options[option_order[i]] = arg + + return user_provided_options + + def _validate_s3_configuration(self, s3): + if s3 is not None: + addressing_style = s3.get('addressing_style') + if addressing_style not in ['virtual', 'auto', 'path', None]: + raise InvalidS3AddressingStyleError( + s3_addressing_style=addressing_style + ) + + def _validate_retry_configuration(self, retries): + valid_options = ('max_attempts', 'mode', 'total_max_attempts') + valid_modes = ('legacy', 'standard', 'adaptive') + if retries is not None: + for key, value in retries.items(): + if key not in valid_options: + raise InvalidRetryConfigurationError( + retry_config_option=key, + valid_options=valid_options, + ) + if key == 'max_attempts' and value < 0: + raise InvalidMaxRetryAttemptsError( + provided_max_attempts=value, + min_value=0, + ) + if key == 'total_max_attempts' and value < 1: + raise InvalidMaxRetryAttemptsError( + provided_max_attempts=value, + min_value=1, + ) + if key == 'mode' and value not in valid_modes: + raise InvalidRetryModeError( + provided_retry_mode=value, + valid_modes=valid_modes, + ) + + def merge(self, other_config): + """Merges the config object with another config object + + This will merge in all non-default values from the provided config + and return a new config object + + :type other_config: botocore.config.Config + :param other config: Another config object to merge with. The values + in the provided config object will take precedence in the merging + + :returns: A config object built from the merged values of both + config objects. + """ + # Make a copy of the current attributes in the config object. + config_options = copy.copy(self._user_provided_options) + + # Merge in the user provided options from the other config + config_options.update(other_config._user_provided_options) + + # Return a new config object with the merged properties. + return Config(**config_options) diff --git a/py311/lib/python3.11/site-packages/botocore/configloader.py b/py311/lib/python3.11/site-packages/botocore/configloader.py new file mode 100644 index 0000000000000000000000000000000000000000..0b6c82bcad6061a82cb39926141734a03dc3bd8c --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/configloader.py @@ -0,0 +1,287 @@ +# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/ +# Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import configparser +import copy +import os +import shlex +import sys + +import botocore.exceptions + + +def multi_file_load_config(*filenames): + """Load and combine multiple INI configs with profiles. + + This function will take a list of filesnames and return + a single dictionary that represents the merging of the loaded + config files. + + If any of the provided filenames does not exist, then that file + is ignored. It is therefore ok to provide a list of filenames, + some of which may not exist. + + Configuration files are **not** deep merged, only the top level + keys are merged. The filenames should be passed in order of + precedence. The first config file has precedence over the + second config file, which has precedence over the third config file, + etc. The only exception to this is that the "profiles" key is + merged to combine profiles from multiple config files into a + single profiles mapping. However, if a profile is defined in + multiple config files, then the config file with the highest + precedence is used. Profile values themselves are not merged. + For example:: + + FileA FileB FileC + [foo] [foo] [bar] + a=1 a=2 a=3 + b=2 + + [bar] [baz] [profile a] + a=2 a=3 region=e + + [profile a] [profile b] [profile c] + region=c region=d region=f + + The final result of ``multi_file_load_config(FileA, FileB, FileC)`` + would be:: + + {"foo": {"a": 1}, "bar": {"a": 2}, "baz": {"a": 3}, + "profiles": {"a": {"region": "c"}}, {"b": {"region": d"}}, + {"c": {"region": "f"}}} + + Note that the "foo" key comes from A, even though it's defined in both + FileA and FileB. Because "foo" was defined in FileA first, then the values + for "foo" from FileA are used and the values for "foo" from FileB are + ignored. Also note where the profiles originate from. Profile "a" + comes FileA, profile "b" comes from FileB, and profile "c" comes + from FileC. + + """ + configs = [] + profiles = [] + for filename in filenames: + try: + loaded = load_config(filename) + except botocore.exceptions.ConfigNotFound: + continue + profiles.append(loaded.pop('profiles')) + configs.append(loaded) + merged_config = _merge_list_of_dicts(configs) + merged_profiles = _merge_list_of_dicts(profiles) + merged_config['profiles'] = merged_profiles + return merged_config + + +def _merge_list_of_dicts(list_of_dicts): + merged_dicts = {} + for single_dict in list_of_dicts: + for key, value in single_dict.items(): + if key not in merged_dicts: + merged_dicts[key] = value + return merged_dicts + + +def load_config(config_filename): + """Parse a INI config with profiles. + + This will parse an INI config file and map top level profiles + into a top level "profile" key. + + If you want to parse an INI file and map all section names to + top level keys, use ``raw_config_parse`` instead. + + """ + parsed = raw_config_parse(config_filename) + return build_profile_map(parsed) + + +def raw_config_parse(config_filename, parse_subsections=True): + """Returns the parsed INI config contents. + + Each section name is a top level key. + + :param config_filename: The name of the INI file to parse + + :param parse_subsections: If True, parse indented blocks as + subsections that represent their own configuration dictionary. + For example, if the config file had the contents:: + + s3 = + signature_version = s3v4 + addressing_style = path + + The resulting ``raw_config_parse`` would be:: + + {'s3': {'signature_version': 's3v4', 'addressing_style': 'path'}} + + If False, do not try to parse subsections and return the indented + block as its literal value:: + + {'s3': '\nsignature_version = s3v4\naddressing_style = path'} + + :returns: A dict with keys for each profile found in the config + file and the value of each key being a dict containing name + value pairs found in that profile. + + :raises: ConfigNotFound, ConfigParseError + """ + config = {} + path = config_filename + if path is not None: + path = os.path.expandvars(path) + path = os.path.expanduser(path) + if not os.path.isfile(path): + raise botocore.exceptions.ConfigNotFound(path=_unicode_path(path)) + cp = configparser.RawConfigParser() + try: + cp.read([path]) + except (configparser.Error, UnicodeDecodeError) as e: + raise botocore.exceptions.ConfigParseError( + path=_unicode_path(path), error=e + ) from None + else: + for section in cp.sections(): + config[section] = {} + for option in cp.options(section): + config_value = cp.get(section, option) + if parse_subsections and config_value.startswith('\n'): + # Then we need to parse the inner contents as + # hierarchical. We support a single level + # of nesting for now. + try: + config_value = _parse_nested(config_value) + except ValueError as e: + raise botocore.exceptions.ConfigParseError( + path=_unicode_path(path), error=e + ) from None + config[section][option] = config_value + return config + + +def _unicode_path(path): + if isinstance(path, str): + return path + # According to the documentation getfilesystemencoding can return None + # on unix in which case the default encoding is used instead. + filesystem_encoding = sys.getfilesystemencoding() + if filesystem_encoding is None: + filesystem_encoding = sys.getdefaultencoding() + return path.decode(filesystem_encoding, 'replace') + + +def _parse_nested(config_value): + # Given a value like this: + # \n + # foo = bar + # bar = baz + # We need to parse this into + # {'foo': 'bar', 'bar': 'baz} + parsed = {} + for line in config_value.splitlines(): + line = line.strip() + if not line: + continue + # The caller will catch ValueError + # and raise an appropriate error + # if this fails. + key, value = line.split('=', 1) + parsed[key.strip()] = value.strip() + return parsed + + +def _parse_section(key, values): + result = {} + try: + parts = shlex.split(key) + except ValueError: + return result + if len(parts) == 2: + result[parts[1]] = values + return result + + +def build_profile_map(parsed_ini_config): + """Convert the parsed INI config into a profile map. + + The config file format requires that every profile except the + default to be prepended with "profile", e.g.:: + + [profile test] + aws_... = foo + aws_... = bar + + [profile bar] + aws_... = foo + aws_... = bar + + # This is *not* a profile + [preview] + otherstuff = 1 + + # Neither is this + [foobar] + morestuff = 2 + + The build_profile_map will take a parsed INI config file where each top + level key represents a section name, and convert into a format where all + the profiles are under a single top level "profiles" key, and each key in + the sub dictionary is a profile name. For example, the above config file + would be converted from:: + + {"profile test": {"aws_...": "foo", "aws...": "bar"}, + "profile bar": {"aws...": "foo", "aws...": "bar"}, + "preview": {"otherstuff": ...}, + "foobar": {"morestuff": ...}, + } + + into:: + + {"profiles": {"test": {"aws_...": "foo", "aws...": "bar"}, + "bar": {"aws...": "foo", "aws...": "bar"}, + "preview": {"otherstuff": ...}, + "foobar": {"morestuff": ...}, + } + + If there are no profiles in the provided parsed INI contents, then + an empty dict will be the value associated with the ``profiles`` key. + + .. note:: + + This will not mutate the passed in parsed_ini_config. Instead it will + make a deepcopy and return that value. + + """ + parsed_config = copy.deepcopy(parsed_ini_config) + profiles = {} + sso_sessions = {} + services = {} + final_config = {} + for key, values in parsed_config.items(): + if key.startswith("profile"): + profiles.update(_parse_section(key, values)) + elif key.startswith("sso-session"): + sso_sessions.update(_parse_section(key, values)) + elif key.startswith("services"): + services.update(_parse_section(key, values)) + elif key == 'default': + # default section is special and is considered a profile + # name but we don't require you use 'profile "default"' + # as a section. + profiles[key] = values + else: + final_config[key] = values + final_config['profiles'] = profiles + final_config['sso_sessions'] = sso_sessions + final_config['services'] = services + return final_config diff --git a/py311/lib/python3.11/site-packages/botocore/configprovider.py b/py311/lib/python3.11/site-packages/botocore/configprovider.py new file mode 100644 index 0000000000000000000000000000000000000000..b4a9c5b757e475d71a27466cc3a1ed895709ab09 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/configprovider.py @@ -0,0 +1,1051 @@ +# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +"""This module contains the interface for controlling how configuration +is loaded. +""" + +import copy +import logging +import os + +from botocore import utils +from botocore.exceptions import InvalidConfigError + +logger = logging.getLogger(__name__) + + +#: A default dictionary that maps the logical names for session variables +#: to the specific environment variables and configuration file names +#: that contain the values for these variables. +#: When creating a new Session object, you can pass in your own dictionary +#: to remap the logical names or to add new logical names. You can then +#: get the current value for these variables by using the +#: ``get_config_variable`` method of the :class:`botocore.session.Session` +#: class. +#: These form the keys of the dictionary. The values in the dictionary +#: are tuples of (, , , +#: ). +#: The conversion func is a function that takes the configuration value +#: as an argument and returns the converted value. If this value is +#: None, then the configuration value is returned unmodified. This +#: conversion function can be used to type convert config values to +#: values other than the default values of strings. +#: The ``profile`` and ``config_file`` variables should always have a +#: None value for the first entry in the tuple because it doesn't make +#: sense to look inside the config file for the location of the config +#: file or for the default profile to use. +#: The ``config_name`` is the name to look for in the configuration file, +#: the ``env var`` is the OS environment variable (``os.environ``) to +#: use, and ``default_value`` is the value to use if no value is otherwise +#: found. +#: NOTE: Fixing the spelling of this variable would be a breaking change. +#: Please leave as is. +BOTOCORE_DEFAUT_SESSION_VARIABLES = { + # logical: config_file, env_var, default_value, conversion_func + 'profile': (None, ['AWS_DEFAULT_PROFILE', 'AWS_PROFILE'], None, None), + 'region': ('region', 'AWS_DEFAULT_REGION', None, None), + 'data_path': ('data_path', 'AWS_DATA_PATH', None, None), + 'config_file': (None, 'AWS_CONFIG_FILE', '~/.aws/config', None), + 'ca_bundle': ('ca_bundle', 'AWS_CA_BUNDLE', None, None), + 'api_versions': ('api_versions', None, {}, None), + # This is the shared credentials file amongst sdks. + 'credentials_file': ( + None, + 'AWS_SHARED_CREDENTIALS_FILE', + '~/.aws/credentials', + None, + ), + # These variables only exist in the config file. + # This is the number of seconds until we time out a request to + # the instance metadata service. + 'metadata_service_timeout': ( + 'metadata_service_timeout', + 'AWS_METADATA_SERVICE_TIMEOUT', + 1, + int, + ), + # This is the number of request attempts we make until we give + # up trying to retrieve data from the instance metadata service. + 'metadata_service_num_attempts': ( + 'metadata_service_num_attempts', + 'AWS_METADATA_SERVICE_NUM_ATTEMPTS', + 1, + int, + ), + 'ec2_metadata_service_endpoint': ( + 'ec2_metadata_service_endpoint', + 'AWS_EC2_METADATA_SERVICE_ENDPOINT', + None, + None, + ), + 'ec2_metadata_service_endpoint_mode': ( + 'ec2_metadata_service_endpoint_mode', + 'AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE', + None, + None, + ), + 'ec2_metadata_v1_disabled': ( + 'ec2_metadata_v1_disabled', + 'AWS_EC2_METADATA_V1_DISABLED', + False, + utils.ensure_boolean, + ), + 'imds_use_ipv6': ( + 'imds_use_ipv6', + 'AWS_IMDS_USE_IPV6', + False, + utils.ensure_boolean, + ), + 'use_dualstack_endpoint': ( + 'use_dualstack_endpoint', + 'AWS_USE_DUALSTACK_ENDPOINT', + None, + utils.ensure_boolean, + ), + 'use_fips_endpoint': ( + 'use_fips_endpoint', + 'AWS_USE_FIPS_ENDPOINT', + None, + utils.ensure_boolean, + ), + 'ignore_configured_endpoint_urls': ( + 'ignore_configured_endpoint_urls', + 'AWS_IGNORE_CONFIGURED_ENDPOINT_URLS', + None, + utils.ensure_boolean, + ), + 'parameter_validation': ('parameter_validation', None, True, None), + # Client side monitoring configurations. + # Note: These configurations are considered internal to botocore. + # Do not use them until publicly documented. + 'csm_enabled': ( + 'csm_enabled', + 'AWS_CSM_ENABLED', + False, + utils.ensure_boolean, + ), + 'csm_host': ('csm_host', 'AWS_CSM_HOST', '127.0.0.1', None), + 'csm_port': ('csm_port', 'AWS_CSM_PORT', 31000, int), + 'csm_client_id': ('csm_client_id', 'AWS_CSM_CLIENT_ID', '', None), + # Endpoint discovery configuration + 'endpoint_discovery_enabled': ( + 'endpoint_discovery_enabled', + 'AWS_ENDPOINT_DISCOVERY_ENABLED', + 'auto', + None, + ), + 'sts_regional_endpoints': ( + 'sts_regional_endpoints', + 'AWS_STS_REGIONAL_ENDPOINTS', + 'regional', + None, + ), + 'retry_mode': ('retry_mode', 'AWS_RETRY_MODE', 'legacy', None), + 'defaults_mode': ('defaults_mode', 'AWS_DEFAULTS_MODE', 'legacy', None), + # We can't have a default here for v1 because we need to defer to + # whatever the defaults are in _retry.json. + 'max_attempts': ('max_attempts', 'AWS_MAX_ATTEMPTS', None, int), + 'user_agent_appid': ('sdk_ua_app_id', 'AWS_SDK_UA_APP_ID', None, None), + 'request_min_compression_size_bytes': ( + 'request_min_compression_size_bytes', + 'AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES', + 10240, + None, + ), + 'disable_request_compression': ( + 'disable_request_compression', + 'AWS_DISABLE_REQUEST_COMPRESSION', + False, + utils.ensure_boolean, + ), + 'sigv4a_signing_region_set': ( + 'sigv4a_signing_region_set', + 'AWS_SIGV4A_SIGNING_REGION_SET', + None, + None, + ), + 'request_checksum_calculation': ( + 'request_checksum_calculation', + 'AWS_REQUEST_CHECKSUM_CALCULATION', + "when_supported", + None, + ), + 'response_checksum_validation': ( + 'response_checksum_validation', + 'AWS_RESPONSE_CHECKSUM_VALIDATION', + "when_supported", + None, + ), + 'account_id_endpoint_mode': ( + 'account_id_endpoint_mode', + 'AWS_ACCOUNT_ID_ENDPOINT_MODE', + 'preferred', + None, + ), + 'disable_host_prefix_injection': ( + 'disable_host_prefix_injection', + 'AWS_DISABLE_HOST_PREFIX_INJECTION', + None, + utils.ensure_boolean, + ), + 'auth_scheme_preference': ( + 'auth_scheme_preference', + 'AWS_AUTH_SCHEME_PREFERENCE', + None, + None, + ), +} +# A mapping for the s3 specific configuration vars. These are the configuration +# vars that typically go in the s3 section of the config file. This mapping +# follows the same schema as the previous session variable mapping. +DEFAULT_S3_CONFIG_VARS = { + 'addressing_style': (('s3', 'addressing_style'), None, None, None), + 'use_accelerate_endpoint': ( + ('s3', 'use_accelerate_endpoint'), + None, + None, + utils.ensure_boolean, + ), + 'use_dualstack_endpoint': ( + ('s3', 'use_dualstack_endpoint'), + None, + None, + utils.ensure_boolean, + ), + 'payload_signing_enabled': ( + ('s3', 'payload_signing_enabled'), + None, + None, + utils.ensure_boolean, + ), + 'use_arn_region': ( + ['s3_use_arn_region', ('s3', 'use_arn_region')], + 'AWS_S3_USE_ARN_REGION', + None, + utils.ensure_boolean, + ), + 'us_east_1_regional_endpoint': ( + [ + 's3_us_east_1_regional_endpoint', + ('s3', 'us_east_1_regional_endpoint'), + ], + 'AWS_S3_US_EAST_1_REGIONAL_ENDPOINT', + None, + None, + ), + 's3_disable_multiregion_access_points': ( + ('s3', 's3_disable_multiregion_access_points'), + 'AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS', + None, + utils.ensure_boolean, + ), +} +# A mapping for the proxy specific configuration vars. These are +# used to configure how botocore interacts with proxy setups while +# sending requests. +DEFAULT_PROXIES_CONFIG_VARS = { + 'proxy_ca_bundle': ('proxy_ca_bundle', None, None, None), + 'proxy_client_cert': ('proxy_client_cert', None, None, None), + 'proxy_use_forwarding_for_https': ( + 'proxy_use_forwarding_for_https', + None, + None, + utils.normalize_boolean, + ), +} + + +def create_botocore_default_config_mapping(session): + chain_builder = ConfigChainFactory(session=session) + config_mapping = _create_config_chain_mapping( + chain_builder, BOTOCORE_DEFAUT_SESSION_VARIABLES + ) + config_mapping['s3'] = SectionConfigProvider( + 's3', + session, + _create_config_chain_mapping(chain_builder, DEFAULT_S3_CONFIG_VARS), + ) + config_mapping['proxies_config'] = SectionConfigProvider( + 'proxies_config', + session, + _create_config_chain_mapping( + chain_builder, DEFAULT_PROXIES_CONFIG_VARS + ), + ) + return config_mapping + + +def _create_config_chain_mapping(chain_builder, config_variables): + mapping = {} + for logical_name, config in config_variables.items(): + mapping[logical_name] = chain_builder.create_config_chain( + instance_name=logical_name, + env_var_names=config[1], + config_property_names=config[0], + default=config[2], + conversion_func=config[3], + ) + return mapping + + +class DefaultConfigResolver: + def __init__(self, default_config_data): + self._base_default_config = default_config_data['base'] + self._modes = default_config_data['modes'] + self._resolved_default_configurations = {} + + def _resolve_default_values_by_mode(self, mode): + default_config = self._base_default_config.copy() + modifications = self._modes.get(mode) + + for config_var in modifications: + default_value = default_config[config_var] + modification_dict = modifications[config_var] + modification = list(modification_dict.keys())[0] + modification_value = modification_dict[modification] + if modification == 'multiply': + default_value *= modification_value + elif modification == 'add': + default_value += modification_value + elif modification == 'override': + default_value = modification_value + default_config[config_var] = default_value + return default_config + + def get_default_modes(self): + default_modes = ['legacy', 'auto'] + default_modes.extend(self._modes.keys()) + return default_modes + + def get_default_config_values(self, mode): + if mode not in self._resolved_default_configurations: + defaults = self._resolve_default_values_by_mode(mode) + self._resolved_default_configurations[mode] = defaults + return self._resolved_default_configurations[mode] + + +class ConfigChainFactory: + """Factory class to create our most common configuration chain case. + + This is a convenience class to construct configuration chains that follow + our most common pattern. This is to prevent ordering them incorrectly, + and to make the config chain construction more readable. + """ + + def __init__(self, session, environ=None): + """Initialize a ConfigChainFactory. + + :type session: :class:`botocore.session.Session` + :param session: This is the session that should be used to look up + values from the config file. + + :type environ: dict + :param environ: A mapping to use for environment variables. If this + is not provided it will default to use os.environ. + """ + self._session = session + if environ is None: + environ = os.environ + self._environ = environ + + def create_config_chain( + self, + instance_name=None, + env_var_names=None, + config_property_names=None, + default=None, + conversion_func=None, + ): + """Build a config chain following the standard botocore pattern. + + In botocore most of our config chains follow the the precendence: + session_instance_variables, environment, config_file, default_value. + + This is a convenience function for creating a chain that follow + that precendence. + + :type instance_name: str + :param instance_name: This indicates what session instance variable + corresponds to this config value. If it is None it will not be + added to the chain. + + :type env_var_names: str or list of str or None + :param env_var_names: One or more environment variable names to + search for this value. They are searched in order. If it is None + it will not be added to the chain. + + :type config_property_names: str/tuple or list of str/tuple or None + :param config_property_names: One of more strings or tuples + representing the name of the key in the config file for this + config option. They are searched in order. If it is None it will + not be added to the chain. + + :type default: Any + :param default: Any constant value to be returned. + + :type conversion_func: None or callable + :param conversion_func: If this value is None then it has no effect on + the return type. Otherwise, it is treated as a function that will + conversion_func our provided type. + + :rvalue: ConfigChain + :returns: A ConfigChain that resolves in the order env_var_names -> + config_property_name -> default. Any values that were none are + omitted form the chain. + """ + providers = [] + if instance_name is not None: + providers.append( + InstanceVarProvider( + instance_var=instance_name, session=self._session + ) + ) + if env_var_names is not None: + providers.extend(self._get_env_providers(env_var_names)) + if config_property_names is not None: + providers.extend( + self._get_scoped_config_providers(config_property_names) + ) + if default is not None: + providers.append(ConstantProvider(value=default)) + + return ChainProvider( + providers=providers, + conversion_func=conversion_func, + ) + + def _get_env_providers(self, env_var_names): + env_var_providers = [] + if not isinstance(env_var_names, list): + env_var_names = [env_var_names] + for env_var_name in env_var_names: + env_var_providers.append( + EnvironmentProvider(name=env_var_name, env=self._environ) + ) + return env_var_providers + + def _get_scoped_config_providers(self, config_property_names): + scoped_config_providers = [] + if not isinstance(config_property_names, list): + config_property_names = [config_property_names] + for config_property_name in config_property_names: + scoped_config_providers.append( + ScopedConfigProvider( + config_var_name=config_property_name, + session=self._session, + ) + ) + return scoped_config_providers + + +class ConfigValueStore: + """The ConfigValueStore object stores configuration values.""" + + def __init__(self, mapping=None): + """Initialize a ConfigValueStore. + + :type mapping: dict + :param mapping: The mapping parameter is a map of string to a subclass + of BaseProvider. When a config variable is asked for via the + get_config_variable method, the corresponding provider will be + invoked to load the value. + """ + self._overrides = {} + self._mapping = {} + if mapping is not None: + for logical_name, provider in mapping.items(): + self.set_config_provider(logical_name, provider) + + def __deepcopy__(self, memo): + config_store = ConfigValueStore(copy.deepcopy(self._mapping, memo)) + for logical_name, override_value in self._overrides.items(): + config_store.set_config_variable(logical_name, override_value) + + return config_store + + def __copy__(self): + config_store = ConfigValueStore(copy.copy(self._mapping)) + for logical_name, override_value in self._overrides.items(): + config_store.set_config_variable(logical_name, override_value) + + return config_store + + def get_config_variable(self, logical_name): + """ + Retrieve the value associated with the specified logical_name + from the corresponding provider. If no value is found None will + be returned. + + :type logical_name: str + :param logical_name: The logical name of the session variable + you want to retrieve. This name will be mapped to the + appropriate environment variable name for this session as + well as the appropriate config file entry. + + :returns: value of variable or None if not defined. + """ + if logical_name in self._overrides: + return self._overrides[logical_name] + if logical_name not in self._mapping: + return None + provider = self._mapping[logical_name] + return provider.provide() + + def get_config_provider(self, logical_name): + """ + Retrieve the provider associated with the specified logical_name. + If no provider is found None will be returned. + + :type logical_name: str + :param logical_name: The logical name of the session variable + you want to retrieve. This name will be mapped to the + appropriate environment variable name for this session as + well as the appropriate config file entry. + + :returns: configuration provider or None if not defined. + """ + if ( + logical_name in self._overrides + or logical_name not in self._mapping + ): + return None + provider = self._mapping[logical_name] + return provider + + def set_config_variable(self, logical_name, value): + """Set a configuration variable to a specific value. + + By using this method, you can override the normal lookup + process used in ``get_config_variable`` by explicitly setting + a value. Subsequent calls to ``get_config_variable`` will + use the ``value``. This gives you per-session specific + configuration values. + + :: + >>> # Assume logical name 'foo' maps to env var 'FOO' + >>> os.environ['FOO'] = 'myvalue' + >>> s.get_config_variable('foo') + 'myvalue' + >>> s.set_config_variable('foo', 'othervalue') + >>> s.get_config_variable('foo') + 'othervalue' + + :type logical_name: str + :param logical_name: The logical name of the session variable + you want to set. These are the keys in ``SESSION_VARIABLES``. + + :param value: The value to associate with the config variable. + """ + self._overrides[logical_name] = value + + def clear_config_variable(self, logical_name): + """Remove an override config variable from the session. + + :type logical_name: str + :param logical_name: The name of the parameter to clear the override + value from. + """ + self._overrides.pop(logical_name, None) + + def set_config_provider(self, logical_name, provider): + """Set the provider for a config value. + + This provides control over how a particular configuration value is + loaded. This replaces the provider for ``logical_name`` with the new + ``provider``. + + :type logical_name: str + :param logical_name: The name of the config value to change the config + provider for. + + :type provider: :class:`botocore.configprovider.BaseProvider` + :param provider: The new provider that should be responsible for + providing a value for the config named ``logical_name``. + """ + self._mapping[logical_name] = provider + + +class SmartDefaultsConfigStoreFactory: + def __init__(self, default_config_resolver, imds_region_provider): + self._default_config_resolver = default_config_resolver + self._imds_region_provider = imds_region_provider + # Initializing _instance_metadata_region as None so we + # can fetch region in a lazy fashion only when needed. + self._instance_metadata_region = None + + def merge_smart_defaults(self, config_store, mode, region_name): + if mode == 'auto': + mode = self.resolve_auto_mode(region_name) + default_configs = ( + self._default_config_resolver.get_default_config_values(mode) + ) + for config_var in default_configs: + config_value = default_configs[config_var] + method = getattr(self, f'_set_{config_var}', None) + if method: + method(config_store, config_value) + + def resolve_auto_mode(self, region_name): + current_region = None + if os.environ.get('AWS_EXECUTION_ENV'): + default_region = os.environ.get('AWS_DEFAULT_REGION') + current_region = os.environ.get('AWS_REGION', default_region) + if not current_region: + if self._instance_metadata_region: + current_region = self._instance_metadata_region + else: + try: + current_region = self._imds_region_provider.provide() + self._instance_metadata_region = current_region + except Exception: + pass + + if current_region: + if region_name == current_region: + return 'in-region' + else: + return 'cross-region' + return 'standard' + + def _update_provider(self, config_store, variable, value): + original_provider = config_store.get_config_provider(variable) + default_provider = ConstantProvider(value) + if isinstance(original_provider, ChainProvider): + chain_provider_copy = copy.deepcopy(original_provider) + chain_provider_copy.set_default_provider(default_provider) + default_provider = chain_provider_copy + elif isinstance(original_provider, BaseProvider): + default_provider = ChainProvider( + providers=[original_provider, default_provider] + ) + config_store.set_config_provider(variable, default_provider) + + def _update_section_provider( + self, config_store, section_name, variable, value + ): + section_provider_copy = copy.deepcopy( + config_store.get_config_provider(section_name) + ) + section_provider_copy.set_default_provider( + variable, ConstantProvider(value) + ) + config_store.set_config_provider(section_name, section_provider_copy) + + def _set_retryMode(self, config_store, value): + self._update_provider(config_store, 'retry_mode', value) + + def _set_stsRegionalEndpoints(self, config_store, value): + self._update_provider(config_store, 'sts_regional_endpoints', value) + + def _set_s3UsEast1RegionalEndpoints(self, config_store, value): + self._update_section_provider( + config_store, 's3', 'us_east_1_regional_endpoint', value + ) + + def _set_connectTimeoutInMillis(self, config_store, value): + self._update_provider(config_store, 'connect_timeout', value / 1000) + + +class BaseProvider: + """Base class for configuration value providers. + + A configuration provider has some method of providing a configuration + value. + """ + + def provide(self): + """Provide a config value.""" + raise NotImplementedError('provide') + + +class ChainProvider(BaseProvider): + """This provider wraps one or more other providers. + + Each provider in the chain is called, the first one returning a non-None + value is then returned. + """ + + def __init__(self, providers=None, conversion_func=None): + """Initalize a ChainProvider. + + :type providers: list + :param providers: The initial list of providers to check for values + when invoked. + + :type conversion_func: None or callable + :param conversion_func: If this value is None then it has no affect on + the return type. Otherwise, it is treated as a function that will + transform provided value. + """ + if providers is None: + providers = [] + self._providers = providers + self._conversion_func = conversion_func + + def __deepcopy__(self, memo): + return ChainProvider( + copy.deepcopy(self._providers, memo), self._conversion_func + ) + + def provide(self): + """Provide the value from the first provider to return non-None. + + Each provider in the chain has its provide method called. The first + one in the chain to return a non-None value is the returned from the + ChainProvider. When no non-None value is found, None is returned. + """ + for provider in self._providers: + value = provider.provide() + if value is not None: + return self._convert_type(value) + return None + + def set_default_provider(self, default_provider): + if self._providers and isinstance( + self._providers[-1], ConstantProvider + ): + self._providers[-1] = default_provider + else: + self._providers.append(default_provider) + + num_of_constants = sum( + isinstance(provider, ConstantProvider) + for provider in self._providers + ) + if num_of_constants > 1: + logger.info( + 'ChainProvider object contains multiple ' + 'instances of ConstantProvider objects' + ) + + def _convert_type(self, value): + if self._conversion_func is not None: + return self._conversion_func(value) + return value + + def __repr__(self): + return '[{}]'.format(', '.join([str(p) for p in self._providers])) + + +class InstanceVarProvider(BaseProvider): + """This class loads config values from the session instance vars.""" + + def __init__(self, instance_var, session): + """Initialize InstanceVarProvider. + + :type instance_var: str + :param instance_var: The instance variable to load from the session. + + :type session: :class:`botocore.session.Session` + :param session: The botocore session to get the loaded configuration + file variables from. + """ + self._instance_var = instance_var + self._session = session + + def __deepcopy__(self, memo): + return InstanceVarProvider( + copy.deepcopy(self._instance_var, memo), self._session + ) + + def provide(self): + """Provide a config value from the session instance vars.""" + instance_vars = self._session.instance_variables() + value = instance_vars.get(self._instance_var) + return value + + def __repr__(self): + return f'InstanceVarProvider(instance_var={self._instance_var}, session={self._session})' + + +class ScopedConfigProvider(BaseProvider): + def __init__(self, config_var_name, session): + """Initialize ScopedConfigProvider. + + :type config_var_name: str or tuple + :param config_var_name: The name of the config variable to load from + the configuration file. If the value is a tuple, it must only + consist of two items, where the first item represents the section + and the second item represents the config var name in the section. + + :type session: :class:`botocore.session.Session` + :param session: The botocore session to get the loaded configuration + file variables from. + """ + self._config_var_name = config_var_name + self._session = session + + def __deepcopy__(self, memo): + return ScopedConfigProvider( + copy.deepcopy(self._config_var_name, memo), self._session + ) + + def provide(self): + """Provide a value from a config file property.""" + scoped_config = self._session.get_scoped_config() + if isinstance(self._config_var_name, tuple): + section_config = scoped_config.get(self._config_var_name[0]) + if not isinstance(section_config, dict): + return None + return section_config.get(self._config_var_name[1]) + return scoped_config.get(self._config_var_name) + + def __repr__(self): + return f'ScopedConfigProvider(config_var_name={self._config_var_name}, session={self._session})' + + +class EnvironmentProvider(BaseProvider): + """This class loads config values from environment variables.""" + + def __init__(self, name, env): + """Initialize with the keys in the dictionary to check. + + :type name: str + :param name: The key with that name will be loaded and returned. + + :type env: dict + :param env: Environment variables dictionary to get variables from. + """ + self._name = name + self._env = env + + def __deepcopy__(self, memo): + return EnvironmentProvider( + copy.deepcopy(self._name, memo), copy.deepcopy(self._env, memo) + ) + + def provide(self): + """Provide a config value from a source dictionary.""" + if self._name in self._env: + return self._env[self._name] + return None + + def __repr__(self): + return f'EnvironmentProvider(name={self._name}, env={self._env})' + + +class SectionConfigProvider(BaseProvider): + """Provides a dictionary from a section in the scoped config + + This is useful for retrieving scoped config variables (i.e. s3) that have + their own set of config variables and resolving logic. + """ + + def __init__(self, section_name, session, override_providers=None): + self._section_name = section_name + self._session = session + self._scoped_config_provider = ScopedConfigProvider( + self._section_name, self._session + ) + self._override_providers = override_providers + if self._override_providers is None: + self._override_providers = {} + + def __deepcopy__(self, memo): + return SectionConfigProvider( + copy.deepcopy(self._section_name, memo), + self._session, + copy.deepcopy(self._override_providers, memo), + ) + + def provide(self): + section_config = self._scoped_config_provider.provide() + if section_config and not isinstance(section_config, dict): + logger.debug( + "The %s config key is not a dictionary type, " + "ignoring its value of: %s", + self._section_name, + section_config, + ) + return None + for section_config_var, provider in self._override_providers.items(): + provider_val = provider.provide() + if provider_val is not None: + if section_config is None: + section_config = {} + section_config[section_config_var] = provider_val + return section_config + + def set_default_provider(self, key, default_provider): + provider = self._override_providers.get(key) + if isinstance(provider, ChainProvider): + provider.set_default_provider(default_provider) + return + elif isinstance(provider, BaseProvider): + default_provider = ChainProvider( + providers=[provider, default_provider] + ) + self._override_providers[key] = default_provider + + def __repr__(self): + return ( + f'SectionConfigProvider(section_name={self._section_name}, ' + f'session={self._session}, ' + f'override_providers={self._override_providers})' + ) + + +class ConstantProvider(BaseProvider): + """This provider provides a constant value.""" + + def __init__(self, value): + self._value = value + + def __deepcopy__(self, memo): + return ConstantProvider(copy.deepcopy(self._value, memo)) + + def provide(self): + """Provide the constant value given during initialization.""" + return self._value + + def __repr__(self): + return f'ConstantProvider(value={self._value})' + + +class ConfiguredEndpointProvider(BaseProvider): + """Lookup an endpoint URL from environment variable or shared config file. + + NOTE: This class is considered private and is subject to abrupt breaking + changes or removal without prior announcement. Please do not use it + directly. + """ + + _ENDPOINT_URL_LOOKUP_ORDER = [ + 'environment_service', + 'environment_global', + 'config_service', + 'config_global', + ] + + def __init__( + self, + full_config, + scoped_config, + client_name, + environ=None, + ): + """Initialize a ConfiguredEndpointProviderChain. + + :type full_config: dict + :param full_config: This is the dict representing the full + configuration file. + + :type scoped_config: dict + :param scoped_config: This is the dict representing the configuration + for the current profile for the session. + + :type client_name: str + :param client_name: The name used to instantiate a client using + botocore.session.Session.create_client. + + :type environ: dict + :param environ: A mapping to use for environment variables. If this + is not provided it will default to use os.environ. + """ + self._full_config = full_config + self._scoped_config = scoped_config + self._client_name = client_name + self._transformed_service_id = self._get_snake_case_service_id( + self._client_name + ) + if environ is None: + environ = os.environ + self._environ = environ + + def provide(self): + """Lookup the configured endpoint URL. + + The order is: + + 1. The value provided by a service-specific environment variable. + 2. The value provided by the global endpoint environment variable + (AWS_ENDPOINT_URL). + 3. The value provided by a service-specific parameter from a services + definition section in the shared configuration file. + 4. The value provided by the global parameter from a services + definition section in the shared configuration file. + """ + for location in self._ENDPOINT_URL_LOOKUP_ORDER: + logger.debug( + 'Looking for endpoint for %s via: %s', + self._client_name, + location, + ) + + endpoint_url = getattr(self, f'_get_endpoint_url_{location}')() + + if endpoint_url: + logger.info( + 'Found endpoint for %s via: %s.', + self._client_name, + location, + ) + return endpoint_url + + logger.debug('No configured endpoint found.') + return None + + def _get_snake_case_service_id(self, client_name): + # Get the service ID without loading the service data file, accounting + # for any aliases and standardizing the names with hyphens. + client_name = utils.SERVICE_NAME_ALIASES.get(client_name, client_name) + hyphenized_service_id = ( + utils.CLIENT_NAME_TO_HYPHENIZED_SERVICE_ID_OVERRIDES.get( + client_name, client_name + ) + ) + return hyphenized_service_id.replace('-', '_') + + def _get_service_env_var_name(self): + transformed_service_id_env = self._transformed_service_id.upper() + return f'AWS_ENDPOINT_URL_{transformed_service_id_env}' + + def _get_services_config(self): + if 'services' not in self._scoped_config: + return {} + + section_name = self._scoped_config['services'] + services_section = self._full_config.get('services', {}).get( + section_name + ) + + if not services_section: + error_msg = ( + f'The profile is configured to use the services ' + f'section but the "{section_name}" services ' + f'configuration does not exist.' + ) + raise InvalidConfigError(error_msg=error_msg) + + return services_section + + def _get_endpoint_url_config_service(self): + snakecase_service_id = self._transformed_service_id.lower() + return ( + self._get_services_config() + .get(snakecase_service_id, {}) + .get('endpoint_url') + ) + + def _get_endpoint_url_config_global(self): + return self._scoped_config.get('endpoint_url') + + def _get_endpoint_url_environment_service(self): + return EnvironmentProvider( + name=self._get_service_env_var_name(), env=self._environ + ).provide() + + def _get_endpoint_url_environment_global(self): + return EnvironmentProvider( + name='AWS_ENDPOINT_URL', env=self._environ + ).provide() diff --git a/py311/lib/python3.11/site-packages/botocore/context.py b/py311/lib/python3.11/site-packages/botocore/context.py new file mode 100644 index 0000000000000000000000000000000000000000..8034747729c1a48acea02bd7b430544b5c98c812 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/context.py @@ -0,0 +1,127 @@ +# Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +""" +NOTE: All classes and functions in this module are considered private and are +subject to abrupt breaking changes. Please do not use them directly. +""" + +from contextlib import contextmanager +from contextvars import ContextVar +from copy import deepcopy +from dataclasses import dataclass, field +from functools import wraps + + +@dataclass +class ClientContext: + """ + Encapsulation of objects tracked within the ``_context`` context variable. + + ``features`` is a set responsible for storing features used during + preparation of an AWS request. ``botocore.useragent.register_feature_id`` + is used to add to this set. + """ + + features: set[str] = field(default_factory=set) + + +_context = ContextVar("_context") + + +def get_context(): + """Get the current ``_context`` context variable if set, else None.""" + return _context.get(None) + + +def set_context(ctx): + """Set the current ``_context`` context variable. + + :type ctx: ClientContext + :param ctx: Client context object to set as the current context variable. + + :rtype: contextvars.Token + :returns: Token object used to revert the context variable to what it was + before the corresponding set. + """ + token = _context.set(ctx) + return token + + +def reset_context(token): + """Reset the current ``_context`` context variable. + + :type token: contextvars.Token + :param token: Token object to reset the context variable. + """ + _context.reset(token) + + +@contextmanager +def start_as_current_context(ctx=None): + """ + Context manager that copies the passed or current context object and sets + it as the current context variable. If no context is found, a new + ``ClientContext`` object is created. It mainly ensures the context variable + is reset to the previous value once the executed code returns. + + Example usage: + + def my_feature(): + with start_as_current_context(): + register_feature_id('MY_FEATURE') + pass + + :type ctx: ClientContext + :param ctx: The client context object to set as the new context variable. + If not provided, the current or a new context variable is used. + """ + current = ctx or get_context() + if current is None: + new = ClientContext() + else: + new = deepcopy(current) + token = set_context(new) + try: + yield + finally: + reset_context(token) + + +def with_current_context(hook=None): + """ + Decorator that wraps ``start_as_current_context`` and optionally invokes a + hook within the newly-set context. This is just syntactic sugar to avoid + indenting existing code under the context manager. + + Example usage: + + @with_current_context(partial(register_feature_id, 'MY_FEATURE')) + def my_feature(): + pass + + :type hook: callable + :param hook: A callable that will be invoked within the scope of the + ``start_as_current_context`` context manager. + """ + + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + with start_as_current_context(): + if hook: + hook() + return func(*args, **kwargs) + + return wrapper + + return decorator diff --git a/py311/lib/python3.11/site-packages/botocore/credentials.py b/py311/lib/python3.11/site-packages/botocore/credentials.py new file mode 100644 index 0000000000000000000000000000000000000000..571dfeac6a5cd695d76394068a3c069d7863e547 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/credentials.py @@ -0,0 +1,2781 @@ +# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/ +# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import base64 +import datetime +import getpass +import json +import logging +import os +import subprocess +import threading +import time +import uuid +from collections import namedtuple +from copy import deepcopy +from hashlib import sha1, sha256 + +import dateutil.parser +from dateutil.parser import parse +from dateutil.tz import tzlocal, tzutc + +import botocore.compat +import botocore.configloader +from botocore import UNSIGNED +from botocore.compat import ( + EC, + compat_shell_split, + total_seconds, +) +from botocore.config import Config +from botocore.exceptions import ( + ConfigNotFound, + CredentialRetrievalError, + InfiniteLoopConfigError, + InvalidConfigError, + LoginError, + LoginInsufficientPermissions, + LoginRefreshRequired, + LoginTokenLoadError, + MetadataRetrievalError, + MissingDependencyException, + PartialCredentialsError, + RefreshWithMFAUnsupportedError, + UnauthorizedSSOTokenError, + UnknownCredentialError, +) +from botocore.tokens import SSOTokenProvider +from botocore.useragent import register_feature_id, register_feature_ids +from botocore.utils import ( + ArnParser, + ContainerMetadataFetcher, + FileWebIdentityTokenLoader, + InstanceMetadataFetcher, + JSONFileCache, + LoginTokenLoader, + SSOTokenLoader, + create_nested_client, + get_login_token_cache_directory, + parse_key_val_file, + resolve_imds_endpoint_mode, +) + +logger = logging.getLogger(__name__) +ReadOnlyCredentials = namedtuple( + 'ReadOnlyCredentials', + ['access_key', 'secret_key', 'token', 'account_id'], + defaults=(None,), +) + +_DEFAULT_MANDATORY_REFRESH_TIMEOUT = 10 * 60 # 10 min +_DEFAULT_ADVISORY_REFRESH_TIMEOUT = 15 * 60 # 15 min + + +def create_credential_resolver(session, cache=None, region_name=None): + """Create a default credential resolver. + + This creates a pre-configured credential resolver + that includes the default lookup chain for + credentials. + + """ + profile_name = session.get_config_variable('profile') or 'default' + metadata_timeout = session.get_config_variable('metadata_service_timeout') + num_attempts = session.get_config_variable('metadata_service_num_attempts') + disable_env_vars = session.instance_variables().get('profile') is not None + + imds_config = { + 'ec2_metadata_service_endpoint': session.get_config_variable( + 'ec2_metadata_service_endpoint' + ), + 'ec2_metadata_service_endpoint_mode': resolve_imds_endpoint_mode( + session + ), + 'ec2_credential_refresh_window': _DEFAULT_ADVISORY_REFRESH_TIMEOUT, + 'ec2_metadata_v1_disabled': session.get_config_variable( + 'ec2_metadata_v1_disabled' + ), + } + + if cache is None: + cache = {} + + env_provider = EnvProvider() + container_provider = ContainerProvider() + instance_metadata_provider = InstanceMetadataProvider( + iam_role_fetcher=InstanceMetadataFetcher( + timeout=metadata_timeout, + num_attempts=num_attempts, + user_agent=session.user_agent(), + config=imds_config, + ) + ) + + profile_provider_builder = ProfileProviderBuilder( + session, cache=cache, region_name=region_name + ) + assume_role_provider = AssumeRoleProvider( + load_config=lambda: session.full_config, + client_creator=_get_client_creator(session, region_name), + cache=cache, + profile_name=profile_name, + credential_sourcer=CanonicalNameCredentialSourcer( + [env_provider, container_provider, instance_metadata_provider] + ), + profile_provider_builder=profile_provider_builder, + ) + + pre_profile = [ + env_provider, + assume_role_provider, + ] + profile_providers = profile_provider_builder.providers( + profile_name=profile_name, + disable_env_vars=disable_env_vars, + ) + post_profile = [ + OriginalEC2Provider(), + BotoProvider(), + container_provider, + instance_metadata_provider, + ] + providers = pre_profile + profile_providers + post_profile + + if disable_env_vars: + # An explicitly provided profile will negate an EnvProvider. + # We will defer to providers that understand the "profile" + # concept to retrieve credentials. + # The one edge case if is all three values are provided via + # env vars: + # export AWS_ACCESS_KEY_ID=foo + # export AWS_SECRET_ACCESS_KEY=bar + # export AWS_PROFILE=baz + # Then, just like our client() calls, the explicit credentials + # will take precedence. + # + # This precedence is enforced by leaving the EnvProvider in the chain. + # This means that the only way a "profile" would win is if the + # EnvProvider does not return credentials, which is what we want + # in this scenario. + providers.remove(env_provider) + logger.debug( + 'Skipping environment variable credential check' + ' because profile name was explicitly set.' + ) + + resolver = CredentialResolver(providers=providers) + return resolver + + +class ProfileProviderBuilder: + """This class handles the creation of profile based providers. + + NOTE: This class is only intended for internal use. + + This class handles the creation and ordering of the various credential + providers that primarly source their configuration from the shared config. + This is needed to enable sharing between the default credential chain and + the source profile chain created by the assume role provider. + """ + + def __init__( + self, + session, + cache=None, + region_name=None, + sso_token_cache=None, + login_token_cache=None, + ): + self._session = session + self._cache = cache + self._region_name = region_name + self._sso_token_cache = sso_token_cache + self._login_token_cache = login_token_cache + + def providers(self, profile_name, disable_env_vars=False): + return [ + self._create_web_identity_provider( + profile_name, + disable_env_vars, + ), + self._create_sso_provider(profile_name), + self._create_shared_credential_provider(profile_name), + self._create_login_provider(profile_name), + self._create_process_provider(profile_name), + self._create_config_provider(profile_name), + ] + + def _create_process_provider(self, profile_name): + return ProcessProvider( + profile_name=profile_name, + load_config=lambda: self._session.full_config, + ) + + def _create_shared_credential_provider(self, profile_name): + credential_file = self._session.get_config_variable('credentials_file') + return SharedCredentialProvider( + profile_name=profile_name, + creds_filename=credential_file, + ) + + def _create_config_provider(self, profile_name): + config_file = self._session.get_config_variable('config_file') + return ConfigProvider( + profile_name=profile_name, + config_filename=config_file, + ) + + def _create_web_identity_provider(self, profile_name, disable_env_vars): + return AssumeRoleWithWebIdentityProvider( + load_config=lambda: self._session.full_config, + client_creator=_get_client_creator( + self._session, self._region_name + ), + cache=self._cache, + profile_name=profile_name, + disable_env_vars=disable_env_vars, + ) + + def _create_sso_provider(self, profile_name): + return SSOProvider( + load_config=lambda: self._session.full_config, + client_creator=self._session.create_client, + profile_name=profile_name, + cache=self._cache, + token_cache=self._sso_token_cache, + token_provider=SSOTokenProvider( + self._session, + cache=self._sso_token_cache, + profile_name=profile_name, + ), + ) + + def _create_login_provider(self, profile_name): + return LoginProvider( + load_config=lambda: self._session.full_config, + client_creator=self._session.create_client, + profile_name=profile_name, + token_cache=self._login_token_cache, + ) + + +def get_credentials(session): + resolver = create_credential_resolver(session) + return resolver.load_credentials() + + +def _local_now(): + return datetime.datetime.now(tzlocal()) + + +def _parse_if_needed(value): + if isinstance(value, datetime.datetime): + return value + return parse(value) + + +def _serialize_if_needed(value, iso=False): + if isinstance(value, datetime.datetime): + if iso: + return value.isoformat() + return value.strftime('%Y-%m-%dT%H:%M:%S%Z') + return value + + +def _get_client_creator(session, region_name): + def client_creator(service_name, **kwargs): + create_client_kwargs = {'region_name': region_name} + create_client_kwargs.update(**kwargs) + return create_nested_client( + session, service_name, **create_client_kwargs + ) + + return client_creator + + +def create_assume_role_refresher(client, params): + def refresh(): + response = client.assume_role(**params) + credentials = response['Credentials'] + # We need to normalize the credential names to + # the values expected by the refresh creds. + return { + 'access_key': credentials['AccessKeyId'], + 'secret_key': credentials['SecretAccessKey'], + 'token': credentials['SessionToken'], + 'expiry_time': _serialize_if_needed(credentials['Expiration']), + } + + return refresh + + +def create_mfa_serial_refresher(actual_refresh): + class _Refresher: + def __init__(self, refresh): + self._refresh = refresh + self._has_been_called = False + + def __call__(self): + if self._has_been_called: + # We can explore an option in the future to support + # reprompting for MFA, but for now we just error out + # when the temp creds expire. + raise RefreshWithMFAUnsupportedError() + self._has_been_called = True + return self._refresh() + + return _Refresher(actual_refresh) + + +class Credentials: + """ + Holds the credentials needed to authenticate requests. + + :param str access_key: The access key part of the credentials. + :param str secret_key: The secret key part of the credentials. + :param str token: The security token, valid only for session credentials. + :param str method: A string which identifies where the credentials + were found. + :param str account_id: (optional) An account ID associated with the credentials. + """ + + def __init__( + self, access_key, secret_key, token=None, method=None, account_id=None + ): + self.access_key = access_key + self.secret_key = secret_key + self.token = token + + if method is None: + method = 'explicit' + self.method = method + self.account_id = account_id + + self._normalize() + + def _normalize(self): + # Keys would sometimes (accidentally) contain non-ascii characters. + # It would cause a confusing UnicodeDecodeError in Python 2. + # We explicitly convert them into unicode to avoid such error. + # + # Eventually the service will decide whether to accept the credential. + # This also complies with the behavior in Python 3. + self.access_key = botocore.compat.ensure_unicode(self.access_key) + self.secret_key = botocore.compat.ensure_unicode(self.secret_key) + + def get_frozen_credentials(self): + return ReadOnlyCredentials( + self.access_key, self.secret_key, self.token, self.account_id + ) + + def get_deferred_property(self, property_name): + def get_property(): + return getattr(self, property_name, None) + + return get_property + + +class RefreshableCredentials(Credentials): + """ + Holds the credentials needed to authenticate requests. In addition, it + knows how to refresh itself. + + :param str access_key: The access key part of the credentials. + :param str secret_key: The secret key part of the credentials. + :param str token: The security token, valid only for session credentials. + :param datetime expiry_time: The expiration time of the credentials. + :param function refresh_using: Callback function to refresh the credentials. + :param str method: A string which identifies where the credentials + were found. + :param function time_fetcher: Callback function to retrieve current time. + """ + + # The time at which we'll attempt to refresh, but not + # block if someone else is refreshing. + _advisory_refresh_timeout = _DEFAULT_ADVISORY_REFRESH_TIMEOUT + # The time at which all threads will block waiting for + # refreshed credentials. + _mandatory_refresh_timeout = _DEFAULT_MANDATORY_REFRESH_TIMEOUT + + def __init__( + self, + access_key, + secret_key, + token, + expiry_time, + refresh_using, + method, + time_fetcher=_local_now, + advisory_timeout=None, + mandatory_timeout=None, + account_id=None, + ): + self._refresh_using = refresh_using + self._access_key = access_key + self._secret_key = secret_key + self._token = token + self._account_id = account_id + self._expiry_time = expiry_time + self._time_fetcher = time_fetcher + self._refresh_lock = threading.Lock() + self.method = method + self._frozen_credentials = ReadOnlyCredentials( + access_key, secret_key, token, account_id + ) + self._normalize() + if advisory_timeout is not None: + self._advisory_refresh_timeout = advisory_timeout + if mandatory_timeout is not None: + self._mandatory_refresh_timeout = mandatory_timeout + + def _normalize(self): + self._access_key = botocore.compat.ensure_unicode(self._access_key) + self._secret_key = botocore.compat.ensure_unicode(self._secret_key) + + @classmethod + def create_from_metadata( + cls, + metadata, + refresh_using, + method, + advisory_timeout=None, + mandatory_timeout=None, + ): + kwargs = {} + if advisory_timeout is not None: + kwargs['advisory_timeout'] = advisory_timeout + if mandatory_timeout is not None: + kwargs['mandatory_timeout'] = mandatory_timeout + + instance = cls( + access_key=metadata['access_key'], + secret_key=metadata['secret_key'], + token=metadata['token'], + expiry_time=cls._expiry_datetime(metadata['expiry_time']), + method=method, + refresh_using=refresh_using, + account_id=metadata.get('account_id'), + **kwargs, + ) + return instance + + @property + def access_key(self): + """Warning: Using this property can lead to race conditions if you + access another property subsequently along the refresh boundary. + Please use get_frozen_credentials instead. + """ + self._refresh() + return self._access_key + + @access_key.setter + def access_key(self, value): + self._access_key = value + + @property + def secret_key(self): + """Warning: Using this property can lead to race conditions if you + access another property subsequently along the refresh boundary. + Please use get_frozen_credentials instead. + """ + self._refresh() + return self._secret_key + + @secret_key.setter + def secret_key(self, value): + self._secret_key = value + + @property + def token(self): + """Warning: Using this property can lead to race conditions if you + access another property subsequently along the refresh boundary. + Please use get_frozen_credentials instead. + """ + self._refresh() + return self._token + + @token.setter + def token(self, value): + self._token = value + + @property + def account_id(self): + """Warning: Using this property can lead to race conditions if you + access another property subsequently along the refresh boundary. + Please use get_frozen_credentials instead. + """ + self._refresh() + return self._account_id + + @account_id.setter + def account_id(self, value): + self._account_id = value + + def _seconds_remaining(self): + delta = self._expiry_time - self._time_fetcher() + return total_seconds(delta) + + def refresh_needed(self, refresh_in=None): + """Check if a refresh is needed. + + A refresh is needed if the expiry time associated + with the temporary credentials is less than the + provided ``refresh_in``. If ``time_delta`` is not + provided, ``self.advisory_refresh_needed`` will be used. + + For example, if your temporary credentials expire + in 10 minutes and the provided ``refresh_in`` is + ``15 * 60``, then this function will return ``True``. + + :type refresh_in: int + :param refresh_in: The number of seconds before the + credentials expire in which refresh attempts should + be made. + + :return: True if refresh needed, False otherwise. + + """ + if self._expiry_time is None: + # No expiration, so assume we don't need to refresh. + return False + + if refresh_in is None: + refresh_in = self._advisory_refresh_timeout + # The credentials should be refreshed if they're going to expire + # in less than 5 minutes. + if self._seconds_remaining() >= refresh_in: + # There's enough time left. Don't refresh. + return False + logger.debug("Credentials need to be refreshed.") + return True + + def _is_expired(self): + # Checks if the current credentials are expired. + return self.refresh_needed(refresh_in=0) + + def _refresh(self): + # In the common case where we don't need a refresh, we + # can immediately exit and not require acquiring the + # refresh lock. + if not self.refresh_needed(self._advisory_refresh_timeout): + return + + # acquire() doesn't accept kwargs, but False is indicating + # that we should not block if we can't acquire the lock. + # If we aren't able to acquire the lock, we'll trigger + # the else clause. + if self._refresh_lock.acquire(False): + try: + if not self.refresh_needed(self._advisory_refresh_timeout): + return + is_mandatory_refresh = self.refresh_needed( + self._mandatory_refresh_timeout + ) + self._protected_refresh(is_mandatory=is_mandatory_refresh) + return + finally: + self._refresh_lock.release() + elif self.refresh_needed(self._mandatory_refresh_timeout): + # If we're within the mandatory refresh window, + # we must block until we get refreshed credentials. + with self._refresh_lock: + if not self.refresh_needed(self._mandatory_refresh_timeout): + return + self._protected_refresh(is_mandatory=True) + + def _protected_refresh(self, is_mandatory): + # precondition: this method should only be called if you've acquired + # the self._refresh_lock. + try: + metadata = self._refresh_using() + except Exception: + period_name = 'mandatory' if is_mandatory else 'advisory' + logger.warning( + "Refreshing temporary credentials failed " + "during %s refresh period.", + period_name, + exc_info=True, + ) + if is_mandatory: + # If this is a mandatory refresh, then + # all errors that occur when we attempt to refresh + # credentials are propagated back to the user. + raise + # Otherwise we'll just return. + # The end result will be that we'll use the current + # set of temporary credentials we have. + return + self._set_from_data(metadata) + self._frozen_credentials = ReadOnlyCredentials( + self._access_key, self._secret_key, self._token, self._account_id + ) + if self._is_expired(): + # We successfully refreshed credentials but for whatever + # reason, our refreshing function returned credentials + # that are still expired. In this scenario, the only + # thing we can do is let the user know and raise + # an exception. + msg = ( + "Credentials were refreshed, but the " + "refreshed credentials are still expired." + ) + logger.warning(msg) + raise RuntimeError(msg) + + @staticmethod + def _expiry_datetime(time_str): + return parse(time_str) + + def _set_from_data(self, data): + expected_keys = ['access_key', 'secret_key', 'token', 'expiry_time'] + if not data: + missing_keys = expected_keys + else: + missing_keys = [k for k in expected_keys if k not in data] + + if missing_keys: + message = "Credential refresh failed, response did not contain: %s" + raise CredentialRetrievalError( + provider=self.method, + error_msg=message % ', '.join(missing_keys), + ) + + self.access_key = data['access_key'] + self.secret_key = data['secret_key'] + self.token = data['token'] + self._expiry_time = parse(data['expiry_time']) + self.account_id = data.get('account_id') + logger.debug( + "Retrieved credentials will expire at: %s", self._expiry_time + ) + self._normalize() + + def get_frozen_credentials(self): + """Return immutable credentials. + + The ``access_key``, ``secret_key``, and ``token`` properties + on this class will always check and refresh credentials if + needed before returning the particular credentials. + + This has an edge case where you can get inconsistent + credentials. Imagine this: + + # Current creds are "t1" + tmp.access_key ---> expired? no, so return t1.access_key + # ---- time is now expired, creds need refreshing to "t2" ---- + tmp.secret_key ---> expired? yes, refresh and return t2.secret_key + + This means we're using the access key from t1 with the secret key + from t2. To fix this issue, you can request a frozen credential object + which is guaranteed not to change. + + The frozen credentials returned from this method should be used + immediately and then discarded. The typical usage pattern would + be:: + + creds = RefreshableCredentials(...) + some_code = SomeSignerObject() + # I'm about to sign the request. + # The frozen credentials are only used for the + # duration of generate_presigned_url and will be + # immediately thrown away. + request = some_code.sign_some_request( + with_credentials=creds.get_frozen_credentials()) + print("Signed request:", request) + + """ + self._refresh() + return self._frozen_credentials + + +class DeferredRefreshableCredentials(RefreshableCredentials): + """Refreshable credentials that don't require initial credentials. + + refresh_using will be called upon first access. + """ + + def __init__(self, refresh_using, method, time_fetcher=_local_now): + self._refresh_using = refresh_using + self._access_key = None + self._secret_key = None + self._token = None + self._account_id = None + self._expiry_time = None + self._time_fetcher = time_fetcher + self._refresh_lock = threading.Lock() + self.method = method + self._frozen_credentials = None + + def refresh_needed(self, refresh_in=None): + if self._frozen_credentials is None: + return True + return super().refresh_needed(refresh_in) + + +class CachedCredentialFetcher: + DEFAULT_EXPIRY_WINDOW_SECONDS = 60 * 15 + + def __init__(self, cache=None, expiry_window_seconds=None): + if cache is None: + cache = {} + self._cache = cache + self._cache_key = self._create_cache_key() + if expiry_window_seconds is None: + expiry_window_seconds = self.DEFAULT_EXPIRY_WINDOW_SECONDS + self._expiry_window_seconds = expiry_window_seconds + self.feature_ids = set() + + def _create_cache_key(self): + raise NotImplementedError('_create_cache_key()') + + def _make_file_safe(self, filename): + # Replace :, path sep, and / to make it the string filename safe. + filename = filename.replace(':', '_').replace(os.sep, '_') + return filename.replace('/', '_') + + def _get_credentials(self): + raise NotImplementedError('_get_credentials()') + + def fetch_credentials(self): + return self._get_cached_credentials() + + def _get_cached_credentials(self): + """Get up-to-date credentials. + + This will check the cache for up-to-date credentials, calling assume + role if none are available. + """ + response = self._load_from_cache() + if response is None: + response = self._get_credentials() + self._write_to_cache(response) + else: + logger.debug("Credentials for role retrieved from cache.") + + creds = response['Credentials'] + expiration = _serialize_if_needed(creds['Expiration'], iso=True) + credentials = { + 'access_key': creds['AccessKeyId'], + 'secret_key': creds['SecretAccessKey'], + 'token': creds['SessionToken'], + 'expiry_time': expiration, + 'account_id': creds.get('AccountId'), + } + + return credentials + + def _load_from_cache(self): + if self._cache_key in self._cache: + creds = deepcopy(self._cache[self._cache_key]) + if not self._is_expired(creds): + return creds + else: + logger.debug( + "Credentials were found in cache, but they are expired." + ) + return None + + def _write_to_cache(self, response): + self._cache[self._cache_key] = deepcopy(response) + + def _is_expired(self, credentials): + """Check if credentials are expired.""" + end_time = _parse_if_needed(credentials['Credentials']['Expiration']) + seconds = total_seconds(end_time - _local_now()) + return seconds < self._expiry_window_seconds + + +class BaseAssumeRoleCredentialFetcher(CachedCredentialFetcher): + def __init__( + self, + client_creator, + role_arn, + extra_args=None, + cache=None, + expiry_window_seconds=None, + ): + self._client_creator = client_creator + self._role_arn = role_arn + + if extra_args is None: + self._assume_kwargs = {} + else: + self._assume_kwargs = deepcopy(extra_args) + self._assume_kwargs['RoleArn'] = self._role_arn + + self._role_session_name = self._assume_kwargs.get('RoleSessionName') + self._using_default_session_name = False + if not self._role_session_name: + self._generate_assume_role_name() + + super().__init__(cache, expiry_window_seconds) + + def _generate_assume_role_name(self): + self._role_session_name = f'botocore-session-{int(time.time())}' + self._assume_kwargs['RoleSessionName'] = self._role_session_name + self._using_default_session_name = True + + def _create_cache_key(self): + """Create a predictable cache key for the current configuration. + + The cache key is intended to be compatible with file names. + """ + args = deepcopy(self._assume_kwargs) + + # The role session name gets randomly generated, so we don't want it + # in the hash. + if self._using_default_session_name: + del args['RoleSessionName'] + + if 'Policy' in args: + # To have a predictable hash, the keys of the policy must be + # sorted, so we have to load it here to make sure it gets sorted + # later on. + args['Policy'] = json.loads(args['Policy']) + + args = json.dumps(args, sort_keys=True) + argument_hash = sha1(args.encode('utf-8')).hexdigest() + return self._make_file_safe(argument_hash) + + def _add_account_id_to_response(self, response): + role_arn = response.get('AssumedRoleUser', {}).get('Arn') + if ArnParser.is_arn(role_arn): + arn_parser = ArnParser() + account_id = arn_parser.parse_arn(role_arn)['account'] + response['Credentials']['AccountId'] = account_id + else: + logger.debug("Unable to extract account ID from Arn: %s", role_arn) + + +class AssumeRoleCredentialFetcher(BaseAssumeRoleCredentialFetcher): + def __init__( + self, + client_creator, + source_credentials, + role_arn, + extra_args=None, + mfa_prompter=None, + cache=None, + expiry_window_seconds=None, + ): + """ + :type client_creator: callable + :param client_creator: A callable that creates a client taking + arguments like ``Session.create_client``. + + :type source_credentials: Credentials + :param source_credentials: The credentials to use to create the + client for the call to AssumeRole. + + :type role_arn: str + :param role_arn: The ARN of the role to be assumed. + + :type extra_args: dict + :param extra_args: Any additional arguments to add to the assume + role request using the format of the botocore operation. + Possible keys include, but may not be limited to, + DurationSeconds, Policy, SerialNumber, ExternalId and + RoleSessionName. + + :type mfa_prompter: callable + :param mfa_prompter: A callable that returns input provided by the + user (i.e raw_input, getpass.getpass, etc.). + + :type cache: dict + :param cache: An object that supports ``__getitem__``, + ``__setitem__``, and ``__contains__``. An example of this is + the ``JSONFileCache`` class in aws-cli. + + :type expiry_window_seconds: int + :param expiry_window_seconds: The amount of time, in seconds, + """ + self._source_credentials = source_credentials + self._mfa_prompter = mfa_prompter + if self._mfa_prompter is None: + self._mfa_prompter = getpass.getpass + + super().__init__( + client_creator, + role_arn, + extra_args=extra_args, + cache=cache, + expiry_window_seconds=expiry_window_seconds, + ) + + def _get_credentials(self): + """Get credentials by calling assume role.""" + register_feature_ids(self.feature_ids) + kwargs = self._assume_role_kwargs() + client = self._create_client() + response = client.assume_role(**kwargs) + self._add_account_id_to_response(response) + return response + + def _assume_role_kwargs(self): + """Get the arguments for assume role based on current configuration.""" + assume_role_kwargs = deepcopy(self._assume_kwargs) + + mfa_serial = assume_role_kwargs.get('SerialNumber') + + if mfa_serial is not None: + prompt = f'Enter MFA code for {mfa_serial}: ' + token_code = self._mfa_prompter(prompt) + assume_role_kwargs['TokenCode'] = token_code + + duration_seconds = assume_role_kwargs.get('DurationSeconds') + + if duration_seconds is not None: + assume_role_kwargs['DurationSeconds'] = duration_seconds + + return assume_role_kwargs + + def _create_client(self): + """Create an STS client using the source credentials.""" + frozen_credentials = self._source_credentials.get_frozen_credentials() + return self._client_creator( + 'sts', + aws_access_key_id=frozen_credentials.access_key, + aws_secret_access_key=frozen_credentials.secret_key, + aws_session_token=frozen_credentials.token, + ) + + +class AssumeRoleWithWebIdentityCredentialFetcher( + BaseAssumeRoleCredentialFetcher +): + def __init__( + self, + client_creator, + web_identity_token_loader, + role_arn, + extra_args=None, + cache=None, + expiry_window_seconds=None, + ): + """ + :type client_creator: callable + :param client_creator: A callable that creates a client taking + arguments like ``Session.create_client``. + + :type web_identity_token_loader: callable + :param web_identity_token_loader: A callable that takes no arguments + and returns a web identity token str. + + :type role_arn: str + :param role_arn: The ARN of the role to be assumed. + + :type extra_args: dict + :param extra_args: Any additional arguments to add to the assume + role request using the format of the botocore operation. + Possible keys include, but may not be limited to, + DurationSeconds, Policy, SerialNumber, ExternalId and + RoleSessionName. + + :type cache: dict + :param cache: An object that supports ``__getitem__``, + ``__setitem__``, and ``__contains__``. An example of this is + the ``JSONFileCache`` class in aws-cli. + + :type expiry_window_seconds: int + :param expiry_window_seconds: The amount of time, in seconds, + """ + self._web_identity_token_loader = web_identity_token_loader + + super().__init__( + client_creator, + role_arn, + extra_args=extra_args, + cache=cache, + expiry_window_seconds=expiry_window_seconds, + ) + + def _get_credentials(self): + """Get credentials by calling assume role.""" + register_feature_ids(self.feature_ids) + kwargs = self._assume_role_kwargs() + # Assume role with web identity does not require credentials other than + # the token, explicitly configure the client to not sign requests. + config = Config(signature_version=UNSIGNED) + client = self._client_creator('sts', config=config) + response = client.assume_role_with_web_identity(**kwargs) + self._add_account_id_to_response(response) + return response + + def _assume_role_kwargs(self): + """Get the arguments for assume role based on current configuration.""" + assume_role_kwargs = deepcopy(self._assume_kwargs) + identity_token = self._web_identity_token_loader() + assume_role_kwargs['WebIdentityToken'] = identity_token + + return assume_role_kwargs + + +class CredentialProvider: + # A short name to identify the provider within botocore. + METHOD = None + + # A name to identify the provider for use in cross-sdk features like + # assume role's `credential_source` configuration option. These names + # are to be treated in a case-insensitive way. NOTE: any providers not + # implemented in botocore MUST prefix their canonical names with + # 'custom' or we DO NOT guarantee that it will work with any features + # that this provides. + CANONICAL_NAME = None + + def __init__(self, session=None): + self.session = session + + def load(self): + """ + Loads the credentials from their source & sets them on the object. + + Subclasses should implement this method (by reading from disk, the + environment, the network or wherever), returning ``True`` if they were + found & loaded. + + If not found, this method should return ``False``, indicating that the + ``CredentialResolver`` should fall back to the next available method. + + The default implementation does nothing, assuming the user has set the + ``access_key/secret_key/token`` themselves. + + :returns: Whether credentials were found & set + :rtype: Credentials + """ + return True + + def _extract_creds_from_mapping(self, mapping, *key_names): + found = [] + for key_name in key_names: + try: + found.append(mapping[key_name]) + except KeyError: + raise PartialCredentialsError( + provider=self.METHOD, cred_var=key_name + ) + return found + + +class ProcessProvider(CredentialProvider): + METHOD = 'custom-process' + + def __init__(self, profile_name, load_config, popen=subprocess.Popen): + self._profile_name = profile_name + self._load_config = load_config + self._loaded_config = None + self._popen = popen + + def load(self): + credential_process = self._credential_process + if credential_process is None: + return + + register_feature_id('CREDENTIALS_PROFILE_PROCESS') + creds_dict = self._retrieve_credentials_using(credential_process) + register_feature_id('CREDENTIALS_PROCESS') + if creds_dict.get('expiry_time') is not None: + return RefreshableCredentials.create_from_metadata( + creds_dict, + lambda: self._retrieve_credentials_using(credential_process), + self.METHOD, + ) + + return Credentials( + access_key=creds_dict['access_key'], + secret_key=creds_dict['secret_key'], + token=creds_dict.get('token'), + method=self.METHOD, + account_id=creds_dict.get('account_id'), + ) + + def _retrieve_credentials_using(self, credential_process): + # We're not using shell=True, so we need to pass the + # command and all arguments as a list. + process_list = compat_shell_split(credential_process) + p = self._popen( + process_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) + stdout, stderr = p.communicate() + if p.returncode != 0: + raise CredentialRetrievalError( + provider=self.METHOD, error_msg=stderr.decode('utf-8') + ) + parsed = botocore.compat.json.loads(stdout.decode('utf-8')) + version = parsed.get('Version', '') + if version != 1: + raise CredentialRetrievalError( + provider=self.METHOD, + error_msg=( + f"Unsupported version '{version}' for credential process " + f"provider, supported versions: 1" + ), + ) + try: + return { + 'access_key': parsed['AccessKeyId'], + 'secret_key': parsed['SecretAccessKey'], + 'token': parsed.get('SessionToken'), + 'expiry_time': parsed.get('Expiration'), + 'account_id': self._get_account_id(parsed), + } + except KeyError as e: + raise CredentialRetrievalError( + provider=self.METHOD, + error_msg=f"Missing required key in response: {e}", + ) + + @property + def _credential_process(self): + return self.profile_config.get('credential_process') + + @property + def profile_config(self): + if self._loaded_config is None: + self._loaded_config = self._load_config() + profile_config = self._loaded_config.get('profiles', {}).get( + self._profile_name, {} + ) + return profile_config + + def _get_account_id(self, parsed): + account_id = parsed.get('AccountId') + return account_id or self.profile_config.get('aws_account_id') + + +class InstanceMetadataProvider(CredentialProvider): + METHOD = 'iam-role' + CANONICAL_NAME = 'Ec2InstanceMetadata' + + def __init__(self, iam_role_fetcher): + self._role_fetcher = iam_role_fetcher + + def load(self): + fetcher = self._role_fetcher + # We do the first request, to see if we get useful data back. + # If not, we'll pass & move on to whatever's next in the credential + # chain. + metadata = fetcher.retrieve_iam_role_credentials() + if not metadata: + return None + register_feature_id('CREDENTIALS_IMDS') + logger.info( + 'Found credentials from IAM Role: %s', metadata['role_name'] + ) + # We manually set the data here, since we already made the request & + # have it. When the expiry is hit, the credentials will auto-refresh + # themselves. + creds = RefreshableCredentials.create_from_metadata( + metadata, + method=self.METHOD, + refresh_using=fetcher.retrieve_iam_role_credentials, + ) + return creds + + +class EnvProvider(CredentialProvider): + METHOD = 'env' + CANONICAL_NAME = 'Environment' + ACCESS_KEY = 'AWS_ACCESS_KEY_ID' + SECRET_KEY = 'AWS_SECRET_ACCESS_KEY' + # The token can come from either of these env var. + # AWS_SESSION_TOKEN is what other AWS SDKs have standardized on. + TOKENS = ['AWS_SECURITY_TOKEN', 'AWS_SESSION_TOKEN'] + EXPIRY_TIME = 'AWS_CREDENTIAL_EXPIRATION' + ACCOUNT_ID = 'AWS_ACCOUNT_ID' + + def __init__(self, environ=None, mapping=None): + """ + + :param environ: The environment variables (defaults to + ``os.environ`` if no value is provided). + :param mapping: An optional mapping of variable names to + environment variable names. Use this if you want to + change the mapping of access_key->AWS_ACCESS_KEY_ID, etc. + The dict can have up to 5 keys: + * ``access_key`` + * ``secret_key`` + * ``token`` + * ``expiry_time`` + * ``account_id`` + """ + if environ is None: + environ = os.environ + self.environ = environ + self._mapping = self._build_mapping(mapping) + + def _build_mapping(self, mapping): + # Mapping of variable name to env var name. + var_mapping = {} + if mapping is None: + # Use the class var default. + var_mapping['access_key'] = self.ACCESS_KEY + var_mapping['secret_key'] = self.SECRET_KEY + var_mapping['token'] = self.TOKENS + var_mapping['expiry_time'] = self.EXPIRY_TIME + var_mapping['account_id'] = self.ACCOUNT_ID + else: + var_mapping['access_key'] = mapping.get( + 'access_key', self.ACCESS_KEY + ) + var_mapping['secret_key'] = mapping.get( + 'secret_key', self.SECRET_KEY + ) + var_mapping['token'] = mapping.get('token', self.TOKENS) + if not isinstance(var_mapping['token'], list): + var_mapping['token'] = [var_mapping['token']] + var_mapping['expiry_time'] = mapping.get( + 'expiry_time', self.EXPIRY_TIME + ) + var_mapping['account_id'] = mapping.get( + 'account_id', self.ACCOUNT_ID + ) + return var_mapping + + def load(self): + """ + Search for credentials in explicit environment variables. + """ + + access_key = self.environ.get(self._mapping['access_key'], '') + + if access_key: + logger.info('Found credentials in environment variables.') + fetcher = self._create_credentials_fetcher() + credentials = fetcher(require_expiry=False) + register_feature_id('CREDENTIALS_ENV_VARS') + + expiry_time = credentials['expiry_time'] + if expiry_time is not None: + expiry_time = parse(expiry_time) + return RefreshableCredentials( + credentials['access_key'], + credentials['secret_key'], + credentials['token'], + expiry_time, + refresh_using=fetcher, + method=self.METHOD, + account_id=credentials['account_id'], + ) + + return Credentials( + credentials['access_key'], + credentials['secret_key'], + credentials['token'], + method=self.METHOD, + account_id=credentials['account_id'], + ) + else: + return None + + def _create_credentials_fetcher(self): + mapping = self._mapping + method = self.METHOD + environ = self.environ + + def fetch_credentials(require_expiry=True): + credentials = {} + + access_key = environ.get(mapping['access_key'], '') + if not access_key: + raise PartialCredentialsError( + provider=method, cred_var=mapping['access_key'] + ) + credentials['access_key'] = access_key + + secret_key = environ.get(mapping['secret_key'], '') + if not secret_key: + raise PartialCredentialsError( + provider=method, cred_var=mapping['secret_key'] + ) + credentials['secret_key'] = secret_key + + credentials['token'] = None + for token_env_var in mapping['token']: + token = environ.get(token_env_var, '') + if token: + credentials['token'] = token + break + + credentials['expiry_time'] = None + expiry_time = environ.get(mapping['expiry_time'], '') + if expiry_time: + credentials['expiry_time'] = expiry_time + if require_expiry and not expiry_time: + raise PartialCredentialsError( + provider=method, cred_var=mapping['expiry_time'] + ) + + credentials['account_id'] = None + account_id = environ.get(mapping['account_id'], '') + if account_id: + credentials['account_id'] = account_id + + return credentials + + return fetch_credentials + + +class OriginalEC2Provider(CredentialProvider): + METHOD = 'ec2-credentials-file' + CANONICAL_NAME = 'Ec2Config' + + CRED_FILE_ENV = 'AWS_CREDENTIAL_FILE' + ACCESS_KEY = 'AWSAccessKeyId' + SECRET_KEY = 'AWSSecretKey' + + def __init__(self, environ=None, parser=None): + if environ is None: + environ = os.environ + if parser is None: + parser = parse_key_val_file + self._environ = environ + self._parser = parser + + def load(self): + """ + Search for a credential file used by original EC2 CLI tools. + """ + if 'AWS_CREDENTIAL_FILE' in self._environ: + full_path = os.path.expanduser( + self._environ['AWS_CREDENTIAL_FILE'] + ) + creds = self._parser(full_path) + if self.ACCESS_KEY in creds: + logger.info('Found credentials in AWS_CREDENTIAL_FILE.') + access_key = creds[self.ACCESS_KEY] + secret_key = creds[self.SECRET_KEY] + # EC2 creds file doesn't support session tokens. + return Credentials(access_key, secret_key, method=self.METHOD) + else: + return None + + +class SharedCredentialProvider(CredentialProvider): + METHOD = 'shared-credentials-file' + CANONICAL_NAME = 'SharedCredentials' + + ACCESS_KEY = 'aws_access_key_id' + SECRET_KEY = 'aws_secret_access_key' + # Same deal as the EnvProvider above. Botocore originally supported + # aws_security_token, but the SDKs are standardizing on aws_session_token + # so we support both. + TOKENS = ['aws_security_token', 'aws_session_token'] + ACCOUNT_ID = 'aws_account_id' + + def __init__(self, creds_filename, profile_name=None, ini_parser=None): + self._creds_filename = creds_filename + if profile_name is None: + profile_name = 'default' + self._profile_name = profile_name + if ini_parser is None: + ini_parser = botocore.configloader.raw_config_parse + self._ini_parser = ini_parser + + def load(self): + try: + available_creds = self._ini_parser(self._creds_filename) + except ConfigNotFound: + return None + if self._profile_name in available_creds: + config = available_creds[self._profile_name] + if self.ACCESS_KEY in config: + logger.info( + "Found credentials in shared credentials file: %s", + self._creds_filename, + ) + access_key, secret_key = self._extract_creds_from_mapping( + config, self.ACCESS_KEY, self.SECRET_KEY + ) + token = self._get_session_token(config) + account_id = self._get_account_id(config) + register_feature_id('CREDENTIALS_PROFILE') + return Credentials( + access_key, + secret_key, + token, + method=self.METHOD, + account_id=account_id, + ) + + def _get_session_token(self, config): + for token_envvar in self.TOKENS: + if token_envvar in config: + return config[token_envvar] + + def _get_account_id(self, config): + return config.get(self.ACCOUNT_ID) + + +class ConfigProvider(CredentialProvider): + """INI based config provider with profile sections.""" + + METHOD = 'config-file' + CANONICAL_NAME = 'SharedConfig' + + ACCESS_KEY = 'aws_access_key_id' + SECRET_KEY = 'aws_secret_access_key' + # Same deal as the EnvProvider above. Botocore originally supported + # aws_security_token, but the SDKs are standardizing on aws_session_token + # so we support both. + TOKENS = ['aws_security_token', 'aws_session_token'] + ACCOUNT_ID = 'aws_account_id' + + def __init__(self, config_filename, profile_name, config_parser=None): + """ + + :param config_filename: The session configuration scoped to the current + profile. This is available via ``session.config``. + :param profile_name: The name of the current profile. + :param config_parser: A config parser callable. + + """ + self._config_filename = config_filename + self._profile_name = profile_name + if config_parser is None: + config_parser = botocore.configloader.load_config + self._config_parser = config_parser + + def load(self): + """ + If there is are credentials in the configuration associated with + the session, use those. + """ + try: + full_config = self._config_parser(self._config_filename) + except ConfigNotFound: + return None + if self._profile_name in full_config['profiles']: + profile_config = full_config['profiles'][self._profile_name] + if self.ACCESS_KEY in profile_config: + logger.info( + "Credentials found in config file: %s", + self._config_filename, + ) + access_key, secret_key = self._extract_creds_from_mapping( + profile_config, self.ACCESS_KEY, self.SECRET_KEY + ) + token = self._get_session_token(profile_config) + account_id = self._get_account_id(profile_config) + register_feature_id('CREDENTIALS_PROFILE') + return Credentials( + access_key, + secret_key, + token, + method=self.METHOD, + account_id=account_id, + ) + else: + return None + + def _get_session_token(self, profile_config): + for token_name in self.TOKENS: + if token_name in profile_config: + return profile_config[token_name] + + def _get_account_id(self, config): + return config.get(self.ACCOUNT_ID) + + +class BotoProvider(CredentialProvider): + METHOD = 'boto-config' + CANONICAL_NAME = 'Boto2Config' + + BOTO_CONFIG_ENV = 'BOTO_CONFIG' + DEFAULT_CONFIG_FILENAMES = ['/etc/boto.cfg', '~/.boto'] + ACCESS_KEY = 'aws_access_key_id' + SECRET_KEY = 'aws_secret_access_key' + + def __init__(self, environ=None, ini_parser=None): + if environ is None: + environ = os.environ + if ini_parser is None: + ini_parser = botocore.configloader.raw_config_parse + self._environ = environ + self._ini_parser = ini_parser + + def load(self): + """ + Look for credentials in boto config file. + """ + if self.BOTO_CONFIG_ENV in self._environ: + potential_locations = [self._environ[self.BOTO_CONFIG_ENV]] + else: + potential_locations = self.DEFAULT_CONFIG_FILENAMES + for filename in potential_locations: + try: + config = self._ini_parser(filename) + except ConfigNotFound: + # Move on to the next potential config file name. + continue + if 'Credentials' in config: + credentials = config['Credentials'] + if self.ACCESS_KEY in credentials: + logger.info( + "Found credentials in boto config file: %s", filename + ) + access_key, secret_key = self._extract_creds_from_mapping( + credentials, self.ACCESS_KEY, self.SECRET_KEY + ) + register_feature_id('CREDENTIALS_BOTO2_CONFIG_FILE') + return Credentials( + access_key, secret_key, method=self.METHOD + ) + + +class AssumeRoleProvider(CredentialProvider): + METHOD = 'assume-role' + # The AssumeRole provider is logically part of the SharedConfig and + # SharedCredentials providers. Since the purpose of the canonical name + # is to provide cross-sdk compatibility, calling code will need to be + # aware that either of those providers should be tied to the AssumeRole + # provider as much as possible. + CANONICAL_NAME = None + ROLE_CONFIG_VAR = 'role_arn' + WEB_IDENTITY_TOKE_FILE_VAR = 'web_identity_token_file' + # Credentials are considered expired (and will be refreshed) once the total + # remaining time left until the credentials expires is less than the + # EXPIRY_WINDOW. + EXPIRY_WINDOW_SECONDS = 60 * 15 + NAMED_PROVIDER_FEATURE_MAP = { + 'Ec2InstanceMetadata': 'CREDENTIALS_IMDS', + 'Environment': 'CREDENTIALS_ENV_VARS', + 'EcsContainer': 'CREDENTIALS_HTTP', + } + + def __init__( + self, + load_config, + client_creator, + cache, + profile_name, + prompter=getpass.getpass, + credential_sourcer=None, + profile_provider_builder=None, + ): + """ + :type load_config: callable + :param load_config: A function that accepts no arguments, and + when called, will return the full configuration dictionary + for the session (``session.full_config``). + + :type client_creator: callable + :param client_creator: A factory function that will create + a client when called. Has the same interface as + ``botocore.session.Session.create_client``. + + :type cache: dict + :param cache: An object that supports ``__getitem__``, + ``__setitem__``, and ``__contains__``. An example + of this is the ``JSONFileCache`` class in the CLI. + + :type profile_name: str + :param profile_name: The name of the profile. + + :type prompter: callable + :param prompter: A callable that returns input provided + by the user (i.e raw_input, getpass.getpass, etc.). + + :type credential_sourcer: CanonicalNameCredentialSourcer + :param credential_sourcer: A credential provider that takes a + configuration, which is used to provide the source credentials + for the STS call. + """ + #: The cache used to first check for assumed credentials. + #: This is checked before making the AssumeRole API + #: calls and can be useful if you have short lived + #: scripts and you'd like to avoid calling AssumeRole + #: until the credentials are expired. + self.cache = cache + self._load_config = load_config + # client_creator is a callable that creates function. + # It's basically session.create_client + self._client_creator = client_creator + self._profile_name = profile_name + self._prompter = prompter + # The _loaded_config attribute will be populated from the + # load_config() function once the configuration is actually + # loaded. The reason we go through all this instead of just + # requiring that the loaded_config be passed to us is to that + # we can defer configuration loaded until we actually try + # to load credentials (as opposed to when the object is + # instantiated). + self._loaded_config = {} + self._credential_sourcer = credential_sourcer + self._profile_provider_builder = profile_provider_builder + self._visited_profiles = [self._profile_name] + self._feature_ids = set() + + def load(self): + self._loaded_config = self._load_config() + profiles = self._loaded_config.get('profiles', {}) + profile = profiles.get(self._profile_name, {}) + if self._has_assume_role_config_vars(profile): + return self._load_creds_via_assume_role(self._profile_name) + + def _has_assume_role_config_vars(self, profile): + return ( + self.ROLE_CONFIG_VAR in profile + and + # We need to ensure this provider doesn't look at a profile when + # the profile has configuration for web identity. Simply relying on + # the order in the credential chain is insufficient as it doesn't + # prevent the case when we're doing an assume role chain. + self.WEB_IDENTITY_TOKE_FILE_VAR not in profile + ) + + def _load_creds_via_assume_role(self, profile_name): + role_config = self._get_role_config(profile_name) + source_credentials = self._resolve_source_credentials( + role_config, profile_name + ) + + extra_args = {} + role_session_name = role_config.get('role_session_name') + if role_session_name is not None: + extra_args['RoleSessionName'] = role_session_name + + external_id = role_config.get('external_id') + if external_id is not None: + extra_args['ExternalId'] = external_id + + mfa_serial = role_config.get('mfa_serial') + if mfa_serial is not None: + extra_args['SerialNumber'] = mfa_serial + + duration_seconds = role_config.get('duration_seconds') + if duration_seconds is not None: + extra_args['DurationSeconds'] = duration_seconds + + fetcher = AssumeRoleCredentialFetcher( + client_creator=self._client_creator, + source_credentials=source_credentials, + role_arn=role_config['role_arn'], + extra_args=extra_args, + mfa_prompter=self._prompter, + cache=self.cache, + ) + fetcher.feature_ids = self._feature_ids.copy() + refresher = fetcher.fetch_credentials + if mfa_serial is not None: + refresher = create_mfa_serial_refresher(refresher) + + self._feature_ids.add('CREDENTIALS_STS_ASSUME_ROLE') + register_feature_ids(self._feature_ids) + # The initial credentials are empty and the expiration time is set + # to now so that we can delay the call to assume role until it is + # strictly needed. + return DeferredRefreshableCredentials( + method=self.METHOD, + refresh_using=refresher, + time_fetcher=_local_now, + ) + + def _get_role_config(self, profile_name): + """Retrieves and validates the role configuration for the profile.""" + profiles = self._loaded_config.get('profiles', {}) + + profile = profiles[profile_name] + source_profile = profile.get('source_profile') + role_arn = profile['role_arn'] + credential_source = profile.get('credential_source') + mfa_serial = profile.get('mfa_serial') + external_id = profile.get('external_id') + role_session_name = profile.get('role_session_name') + duration_seconds = profile.get('duration_seconds') + + role_config = { + 'role_arn': role_arn, + 'external_id': external_id, + 'mfa_serial': mfa_serial, + 'role_session_name': role_session_name, + 'source_profile': source_profile, + 'credential_source': credential_source, + } + + if duration_seconds is not None: + try: + role_config['duration_seconds'] = int(duration_seconds) + except ValueError: + pass + + # Either the credential source or the source profile must be + # specified, but not both. + if credential_source is not None and source_profile is not None: + raise InvalidConfigError( + error_msg=( + f'The profile "{profile_name}" contains both ' + 'source_profile and credential_source.' + ) + ) + elif credential_source is None and source_profile is None: + raise PartialCredentialsError( + provider=self.METHOD, + cred_var='source_profile or credential_source', + ) + elif credential_source is not None: + self._validate_credential_source(profile_name, credential_source) + else: + self._validate_source_profile(profile_name, source_profile) + + return role_config + + def _validate_credential_source(self, parent_profile, credential_source): + if self._credential_sourcer is None: + raise InvalidConfigError( + error_msg=( + f"The credential_source \"{credential_source}\" is specified " + f"in profile \"{parent_profile}\", " + f"but no source provider was configured." + ) + ) + if not self._credential_sourcer.is_supported(credential_source): + raise InvalidConfigError( + error_msg=( + f"The credential source \"{credential_source}\" referenced " + f"in profile \"{parent_profile}\" is not valid." + ) + ) + + def _source_profile_has_credentials(self, profile): + return any( + [ + self._has_static_credentials(profile), + self._has_assume_role_config_vars(profile), + ] + ) + + def _validate_source_profile( + self, parent_profile_name, source_profile_name + ): + profiles = self._loaded_config.get('profiles', {}) + if source_profile_name not in profiles: + raise InvalidConfigError( + error_msg=( + f"The source_profile \"{source_profile_name}\" referenced in " + f"the profile \"{parent_profile_name}\" does not exist." + ) + ) + + source_profile = profiles[source_profile_name] + + # Make sure we aren't going into an infinite loop. If we haven't + # visited the profile yet, we're good. + if source_profile_name not in self._visited_profiles: + return + + # If we have visited the profile and the profile isn't simply + # referencing itself, that's an infinite loop. + if source_profile_name != parent_profile_name: + raise InfiniteLoopConfigError( + source_profile=source_profile_name, + visited_profiles=self._visited_profiles, + ) + + # A profile is allowed to reference itself so that it can source + # static credentials and have configuration all in the same + # profile. This will only ever work for the top level assume + # role because the static credentials will otherwise take + # precedence. + if not self._has_static_credentials(source_profile): + raise InfiniteLoopConfigError( + source_profile=source_profile_name, + visited_profiles=self._visited_profiles, + ) + + def _has_static_credentials(self, profile): + static_keys = ['aws_secret_access_key', 'aws_access_key_id'] + return any(static_key in profile for static_key in static_keys) + + def _resolve_source_credentials(self, role_config, profile_name): + credential_source = role_config.get('credential_source') + if credential_source is not None: + self._feature_ids.add('CREDENTIALS_PROFILE_NAMED_PROVIDER') + return self._resolve_credentials_from_source( + credential_source, profile_name + ) + + source_profile = role_config['source_profile'] + self._visited_profiles.append(source_profile) + self._feature_ids.add('CREDENTIALS_PROFILE_SOURCE_PROFILE') + return self._resolve_credentials_from_profile(source_profile) + + def _resolve_credentials_from_profile(self, profile_name): + profiles = self._loaded_config.get('profiles', {}) + profile = profiles[profile_name] + self._feature_ids.add('CREDENTIALS_PROFILE') + if ( + self._has_static_credentials(profile) + and not self._profile_provider_builder + ): + # This is only here for backwards compatibility. If this provider + # isn't given a profile provider builder we still want to be able + # to handle the basic static credential case as we would before the + # profile provider builder parameter was added. + return self._resolve_static_credentials_from_profile(profile) + elif self._has_static_credentials( + profile + ) or not self._has_assume_role_config_vars(profile): + profile_providers = self._profile_provider_builder.providers( + profile_name=profile_name, + disable_env_vars=True, + ) + profile_chain = CredentialResolver(profile_providers) + credentials = profile_chain.load_credentials() + if credentials is None: + error_message = ( + 'The source profile "%s" must have credentials.' + ) + raise InvalidConfigError( + error_msg=error_message % profile_name, + ) + return credentials + + return self._load_creds_via_assume_role(profile_name) + + def _resolve_static_credentials_from_profile(self, profile): + try: + return Credentials( + access_key=profile['aws_access_key_id'], + secret_key=profile['aws_secret_access_key'], + token=profile.get('aws_session_token'), + ) + except KeyError as e: + raise PartialCredentialsError( + provider=self.METHOD, cred_var=str(e) + ) + + def _resolve_credentials_from_source( + self, credential_source, profile_name + ): + credentials = self._credential_sourcer.source_credentials( + credential_source + ) + if credentials is None: + raise CredentialRetrievalError( + provider=credential_source, + error_msg=( + 'No credentials found in credential_source referenced ' + f'in profile {profile_name}' + ), + ) + named_provider_feature_id = self.NAMED_PROVIDER_FEATURE_MAP.get( + credential_source + ) + if named_provider_feature_id: + self._feature_ids.add(named_provider_feature_id) + return credentials + + +class AssumeRoleWithWebIdentityProvider(CredentialProvider): + METHOD = 'assume-role-with-web-identity' + CANONICAL_NAME = None + _CONFIG_TO_ENV_VAR = { + 'web_identity_token_file': 'AWS_WEB_IDENTITY_TOKEN_FILE', + 'role_session_name': 'AWS_ROLE_SESSION_NAME', + 'role_arn': 'AWS_ROLE_ARN', + } + + def __init__( + self, + load_config, + client_creator, + profile_name, + cache=None, + disable_env_vars=False, + token_loader_cls=None, + ): + self.cache = cache + self._load_config = load_config + self._client_creator = client_creator + self._profile_name = profile_name + self._profile_config = None + self._disable_env_vars = disable_env_vars + if token_loader_cls is None: + token_loader_cls = FileWebIdentityTokenLoader + self._token_loader_cls = token_loader_cls + self._feature_ids = set() + + def load(self): + return self._assume_role_with_web_identity() + + def _get_profile_config(self, key): + if self._profile_config is None: + loaded_config = self._load_config() + profiles = loaded_config.get('profiles', {}) + self._profile_config = profiles.get(self._profile_name, {}) + return self._profile_config.get(key) + + def _get_env_config(self, key): + if self._disable_env_vars: + return None + env_key = self._CONFIG_TO_ENV_VAR.get(key) + if env_key and env_key in os.environ: + return os.environ[env_key] + return None + + def _get_config(self, key): + env_value = self._get_env_config(key) + if env_value is not None: + self._feature_ids.add('CREDENTIALS_ENV_VARS_STS_WEB_ID_TOKEN') + return env_value + + config_value = self._get_profile_config(key) + if config_value is not None: + self._feature_ids.add('CREDENTIALS_PROFILE_STS_WEB_ID_TOKEN') + return config_value + + return None + + def _assume_role_with_web_identity(self): + token_path = self._get_config('web_identity_token_file') + if not token_path: + return None + token_loader = self._token_loader_cls(token_path) + + role_arn = self._get_config('role_arn') + if not role_arn: + error_msg = ( + 'The provided profile or the current environment is ' + 'configured to assume role with web identity but has no ' + 'role ARN configured. Ensure that the profile has the role_arn' + 'configuration set or the AWS_ROLE_ARN env var is set.' + ) + raise InvalidConfigError(error_msg=error_msg) + + extra_args = {} + role_session_name = self._get_config('role_session_name') + if role_session_name is not None: + extra_args['RoleSessionName'] = role_session_name + + fetcher = AssumeRoleWithWebIdentityCredentialFetcher( + client_creator=self._client_creator, + web_identity_token_loader=token_loader, + role_arn=role_arn, + extra_args=extra_args, + cache=self.cache, + ) + fetcher.feature_ids = self._feature_ids.copy() + + self._feature_ids.add('CREDENTIALS_STS_ASSUME_ROLE_WEB_ID') + register_feature_ids(self._feature_ids) + # The initial credentials are empty and the expiration time is set + # to now so that we can delay the call to assume role until it is + # strictly needed. + return DeferredRefreshableCredentials( + method=self.METHOD, + refresh_using=fetcher.fetch_credentials, + ) + + +class CanonicalNameCredentialSourcer: + def __init__(self, providers): + self._providers = providers + + def is_supported(self, source_name): + """Validates a given source name. + + :type source_name: str + :param source_name: The value of credential_source in the config + file. This is the canonical name of the credential provider. + + :rtype: bool + :returns: True if the credential provider is supported, + False otherwise. + """ + return source_name in [p.CANONICAL_NAME for p in self._providers] + + def source_credentials(self, source_name): + """Loads source credentials based on the provided configuration. + + :type source_name: str + :param source_name: The value of credential_source in the config + file. This is the canonical name of the credential provider. + + :rtype: Credentials + """ + source = self._get_provider(source_name) + if isinstance(source, CredentialResolver): + return source.load_credentials() + return source.load() + + def _get_provider(self, canonical_name): + """Return a credential provider by its canonical name. + + :type canonical_name: str + :param canonical_name: The canonical name of the provider. + + :raises UnknownCredentialError: Raised if no + credential provider by the provided name + is found. + """ + provider = self._get_provider_by_canonical_name(canonical_name) + + # The AssumeRole provider should really be part of the SharedConfig + # provider rather than being its own thing, but it is not. It is + # effectively part of both the SharedConfig provider and the + # SharedCredentials provider now due to the way it behaves. + # Therefore if we want either of those providers we should return + # the AssumeRole provider with it. + if canonical_name.lower() in ['sharedconfig', 'sharedcredentials']: + assume_role_provider = self._get_provider_by_method('assume-role') + if assume_role_provider is not None: + # The SharedConfig or SharedCredentials provider may not be + # present if it was removed for some reason, but the + # AssumeRole provider could still be present. In that case, + # return the assume role provider by itself. + if provider is None: + return assume_role_provider + + # If both are present, return them both as a + # CredentialResolver so that calling code can treat them as + # a single entity. + return CredentialResolver([assume_role_provider, provider]) + + if provider is None: + raise UnknownCredentialError(name=canonical_name) + + return provider + + def _get_provider_by_canonical_name(self, canonical_name): + """Return a credential provider by its canonical name. + + This function is strict, it does not attempt to address + compatibility issues. + """ + for provider in self._providers: + name = provider.CANONICAL_NAME + # Canonical names are case-insensitive + if name and name.lower() == canonical_name.lower(): + return provider + + def _get_provider_by_method(self, method): + """Return a credential provider by its METHOD name.""" + for provider in self._providers: + if provider.METHOD == method: + return provider + + +class ContainerProvider(CredentialProvider): + METHOD = 'container-role' + CANONICAL_NAME = 'EcsContainer' + ENV_VAR = 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' + ENV_VAR_FULL = 'AWS_CONTAINER_CREDENTIALS_FULL_URI' + ENV_VAR_AUTH_TOKEN = 'AWS_CONTAINER_AUTHORIZATION_TOKEN' + ENV_VAR_AUTH_TOKEN_FILE = 'AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE' + + def __init__(self, environ=None, fetcher=None): + if environ is None: + environ = os.environ + if fetcher is None: + fetcher = ContainerMetadataFetcher() + self._environ = environ + self._fetcher = fetcher + + def load(self): + # This cred provider is only triggered if the self.ENV_VAR is set, + # which only happens if you opt into this feature. + if self.ENV_VAR in self._environ or self.ENV_VAR_FULL in self._environ: + return self._retrieve_or_fail() + + def _retrieve_or_fail(self): + if self._provided_relative_uri(): + full_uri = self._fetcher.full_url(self._environ[self.ENV_VAR]) + else: + full_uri = self._environ[self.ENV_VAR_FULL] + fetcher = self._create_fetcher(full_uri) + creds = fetcher() + return RefreshableCredentials( + access_key=creds['access_key'], + secret_key=creds['secret_key'], + token=creds['token'], + method=self.METHOD, + expiry_time=_parse_if_needed(creds['expiry_time']), + refresh_using=fetcher, + account_id=creds.get('account_id'), + ) + + def _build_headers(self): + auth_token = None + if self.ENV_VAR_AUTH_TOKEN_FILE in self._environ: + auth_token_file_path = self._environ[self.ENV_VAR_AUTH_TOKEN_FILE] + with open(auth_token_file_path) as token_file: + auth_token = token_file.read() + elif self.ENV_VAR_AUTH_TOKEN in self._environ: + auth_token = self._environ[self.ENV_VAR_AUTH_TOKEN] + if auth_token is not None: + self._validate_auth_token(auth_token) + return {'Authorization': auth_token} + + def _validate_auth_token(self, auth_token): + if "\r" in auth_token or "\n" in auth_token: + raise ValueError("Auth token value is not a legal header value") + + def _create_fetcher(self, full_uri, *args, **kwargs): + def fetch_creds(): + try: + headers = self._build_headers() + response = self._fetcher.retrieve_full_uri( + full_uri, headers=headers + ) + register_feature_id('CREDENTIALS_HTTP') + except MetadataRetrievalError as e: + logger.debug( + "Error retrieving container metadata: %s", e, exc_info=True + ) + raise CredentialRetrievalError( + provider=self.METHOD, error_msg=str(e) + ) + return { + 'access_key': response['AccessKeyId'], + 'secret_key': response['SecretAccessKey'], + 'token': response['Token'], + 'expiry_time': response['Expiration'], + 'account_id': response.get('AccountId'), + } + + return fetch_creds + + def _provided_relative_uri(self): + return self.ENV_VAR in self._environ + + +class CredentialResolver: + def __init__(self, providers): + """ + + :param providers: A list of ``CredentialProvider`` instances. + + """ + self.providers = providers + + def insert_before(self, name, credential_provider): + """ + Inserts a new instance of ``CredentialProvider`` into the chain that + will be tried before an existing one. + + :param name: The short name of the credentials you'd like to insert the + new credentials before. (ex. ``env`` or ``config``). Existing names + & ordering can be discovered via ``self.available_methods``. + :type name: string + + :param cred_instance: An instance of the new ``Credentials`` object + you'd like to add to the chain. + :type cred_instance: A subclass of ``Credentials`` + """ + try: + offset = [p.METHOD for p in self.providers].index(name) + except ValueError: + raise UnknownCredentialError(name=name) + self.providers.insert(offset, credential_provider) + + def insert_after(self, name, credential_provider): + """ + Inserts a new type of ``Credentials`` instance into the chain that will + be tried after an existing one. + + :param name: The short name of the credentials you'd like to insert the + new credentials after. (ex. ``env`` or ``config``). Existing names + & ordering can be discovered via ``self.available_methods``. + :type name: string + + :param cred_instance: An instance of the new ``Credentials`` object + you'd like to add to the chain. + :type cred_instance: A subclass of ``Credentials`` + """ + offset = self._get_provider_offset(name) + self.providers.insert(offset + 1, credential_provider) + + def remove(self, name): + """ + Removes a given ``Credentials`` instance from the chain. + + :param name: The short name of the credentials instance to remove. + :type name: string + """ + available_methods = [p.METHOD for p in self.providers] + if name not in available_methods: + # It's not present. Fail silently. + return + + offset = available_methods.index(name) + self.providers.pop(offset) + + def get_provider(self, name): + """Return a credential provider by name. + + :type name: str + :param name: The name of the provider. + + :raises UnknownCredentialError: Raised if no + credential provider by the provided name + is found. + """ + return self.providers[self._get_provider_offset(name)] + + def _get_provider_offset(self, name): + try: + return [p.METHOD for p in self.providers].index(name) + except ValueError: + raise UnknownCredentialError(name=name) + + def load_credentials(self): + """ + Goes through the credentials chain, returning the first ``Credentials`` + that could be loaded. + """ + # First provider to return a non-None response wins. + for provider in self.providers: + logger.debug("Looking for credentials via: %s", provider.METHOD) + creds = provider.load() + if creds is not None: + return creds + + # If we got here, no credentials could be found. + # This feels like it should be an exception, but historically, ``None`` + # is returned. + # + # +1 + # -js + return None + + +class SSOCredentialFetcher(CachedCredentialFetcher): + _UTC_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ' + + def __init__( + self, + start_url, + sso_region, + role_name, + account_id, + client_creator, + token_loader=None, + cache=None, + expiry_window_seconds=None, + token_provider=None, + sso_session_name=None, + time_fetcher=_local_now, + ): + self._client_creator = client_creator + self._sso_region = sso_region + self._role_name = role_name + self._account_id = account_id + self._start_url = start_url + self._token_loader = token_loader + self._token_provider = token_provider + self._sso_session_name = sso_session_name + self._time_fetcher = time_fetcher + super().__init__(cache, expiry_window_seconds) + + def _create_cache_key(self): + """Create a predictable cache key for the current configuration. + + The cache key is intended to be compatible with file names. + """ + args = { + 'roleName': self._role_name, + 'accountId': self._account_id, + } + if self._sso_session_name: + args['sessionName'] = self._sso_session_name + else: + args['startUrl'] = self._start_url + # NOTE: It would be good to hoist this cache key construction logic + # into the CachedCredentialFetcher class as we should be consistent. + # Unfortunately, the current assume role fetchers that sub class don't + # pass separators resulting in non-minified JSON. In the long term, + # all fetchers should use the below caching scheme. + args = json.dumps(args, sort_keys=True, separators=(',', ':')) + argument_hash = sha1(args.encode('utf-8')).hexdigest() + return self._make_file_safe(argument_hash) + + def _parse_timestamp(self, timestamp_ms): + # fromtimestamp expects seconds so: milliseconds / 1000 = seconds + timestamp_seconds = timestamp_ms / 1000.0 + timestamp = datetime.datetime.fromtimestamp(timestamp_seconds, tzutc()) + return timestamp.strftime(self._UTC_DATE_FORMAT) + + def _get_credentials(self): + """Get credentials by calling SSO get role credentials.""" + config = Config( + signature_version=UNSIGNED, + region_name=self._sso_region, + ) + client = self._client_creator('sso', config=config) + if self._token_provider: + initial_token_data = self._token_provider.load_token() + token = initial_token_data.get_frozen_token().token + else: + token_dict = self._token_loader(self._start_url) + token = token_dict['accessToken'] + + # raise an UnauthorizedSSOTokenError if the loaded legacy token + # is expired to save a call to GetRoleCredentials with an + # expired token. + expiration = dateutil.parser.parse(token_dict['expiresAt']) + remaining = total_seconds(expiration - self._time_fetcher()) + if remaining <= 0: + raise UnauthorizedSSOTokenError() + + kwargs = { + 'roleName': self._role_name, + 'accountId': self._account_id, + 'accessToken': token, + } + try: + register_feature_ids(self.feature_ids) + response = client.get_role_credentials(**kwargs) + except client.exceptions.UnauthorizedException: + raise UnauthorizedSSOTokenError() + credentials = response['roleCredentials'] + + credentials = { + 'ProviderType': 'sso', + 'Credentials': { + 'AccessKeyId': credentials['accessKeyId'], + 'SecretAccessKey': credentials['secretAccessKey'], + 'SessionToken': credentials['sessionToken'], + 'Expiration': self._parse_timestamp(credentials['expiration']), + 'AccountId': self._account_id, + }, + } + return credentials + + +class SSOProvider(CredentialProvider): + METHOD = 'sso' + + _SSO_TOKEN_CACHE_DIR = os.path.expanduser( + os.path.join('~', '.aws', 'sso', 'cache') + ) + _PROFILE_REQUIRED_CONFIG_VARS = ( + 'sso_role_name', + 'sso_account_id', + ) + _SSO_REQUIRED_CONFIG_VARS = ( + 'sso_start_url', + 'sso_region', + ) + _ALL_REQUIRED_CONFIG_VARS = ( + _PROFILE_REQUIRED_CONFIG_VARS + _SSO_REQUIRED_CONFIG_VARS + ) + + def __init__( + self, + load_config, + client_creator, + profile_name, + cache=None, + token_cache=None, + token_provider=None, + ): + if token_cache is None: + token_cache = JSONFileCache(self._SSO_TOKEN_CACHE_DIR) + self._token_cache = token_cache + self._token_provider = token_provider + if cache is None: + cache = {} + self.cache = cache + self._load_config = load_config + self._client_creator = client_creator + self._profile_name = profile_name + self._feature_ids = set() + + def _load_sso_config(self): + loaded_config = self._load_config() + profiles = loaded_config.get('profiles', {}) + profile_name = self._profile_name + profile_config = profiles.get(self._profile_name, {}) + sso_sessions = loaded_config.get('sso_sessions', {}) + + # Role name & Account ID indicate the cred provider should be used + if all( + c not in profile_config for c in self._PROFILE_REQUIRED_CONFIG_VARS + ): + return None + + resolved_config, extra_reqs = self._resolve_sso_session_reference( + profile_config, sso_sessions + ) + + config = {} + missing_config_vars = [] + all_required_configs = self._ALL_REQUIRED_CONFIG_VARS + extra_reqs + for config_var in all_required_configs: + if config_var in resolved_config: + config[config_var] = resolved_config[config_var] + else: + missing_config_vars.append(config_var) + + if missing_config_vars: + missing = ', '.join(missing_config_vars) + raise InvalidConfigError( + error_msg=( + f'The profile "{profile_name}" is configured to use SSO ' + f'but is missing required configuration: {missing}' + ) + ) + return config + + def _resolve_sso_session_reference(self, profile_config, sso_sessions): + sso_session_name = profile_config.get('sso_session') + if sso_session_name is None: + # No reference to resolve, proceed with legacy flow + return profile_config, () + + if sso_session_name not in sso_sessions: + error_msg = f'The specified sso-session does not exist: "{sso_session_name}"' + raise InvalidConfigError(error_msg=error_msg) + + config = profile_config.copy() + session = sso_sessions[sso_session_name] + for config_var, val in session.items(): + # Validate any keys referenced in both profile and sso_session match + if config.get(config_var, val) != val: + error_msg = ( + f"The value for {config_var} is inconsistent between " + f"profile ({config[config_var]}) and sso-session ({val})." + ) + raise InvalidConfigError(error_msg=error_msg) + config[config_var] = val + return config, ('sso_session',) + + def load(self): + sso_config = self._load_sso_config() + if not sso_config: + return None + + fetcher_kwargs = { + 'start_url': sso_config['sso_start_url'], + 'sso_region': sso_config['sso_region'], + 'role_name': sso_config['sso_role_name'], + 'account_id': sso_config['sso_account_id'], + 'client_creator': self._client_creator, + 'token_loader': SSOTokenLoader(cache=self._token_cache), + 'cache': self.cache, + } + sso_session_in_config = 'sso_session' in sso_config + if sso_session_in_config: + fetcher_kwargs['sso_session_name'] = sso_config['sso_session'] + fetcher_kwargs['token_provider'] = self._token_provider + self._feature_ids.add('CREDENTIALS_PROFILE_SSO') + else: + self._feature_ids.add('CREDENTIALS_PROFILE_SSO_LEGACY') + + sso_fetcher = SSOCredentialFetcher(**fetcher_kwargs) + sso_fetcher.feature_ids = self._feature_ids.copy() + + if sso_session_in_config: + self._feature_ids.add('CREDENTIALS_SSO') + else: + self._feature_ids.add('CREDENTIALS_SSO_LEGACY') + + register_feature_ids(self._feature_ids) + return DeferredRefreshableCredentials( + method=self.METHOD, + refresh_using=sso_fetcher.fetch_credentials, + ) + + +def _base64_url_encode_no_padding(data): + return base64.urlsafe_b64encode(data).rstrip(b'=').decode('ascii') + + +def _build_dpop_header(private_key, uri, uid=None, ts=None): + if EC is None: + raise MissingDependencyException( + msg=( + "This operation requires an additional dependency. You" + " will need to pip install \"botocore[crt]\" before proceeding." + ) + ) + x, y = private_key.get_public_coords() + jwk = { + "kty": "EC", + "x": _base64_url_encode_no_padding(x), + "y": _base64_url_encode_no_padding(y), + "crv": "P-256", + } + + header = { + "typ": "dpop+jwt", + "alg": "ES256", + "jwk": jwk, + } + + payload = { + "htm": "POST", + "htu": uri, + "iat": ts or int(time.time()), + "jti": uid or str(uuid.uuid4()), + } + header_b64 = _base64_url_encode_no_padding( + json.dumps(header, separators=(',', ':')).encode() + ) + payload_b64 = _base64_url_encode_no_padding( + json.dumps(payload, separators=(',', ':')).encode() + ) + signing_input = f"{header_b64}.{payload_b64}".encode() + signature = private_key.sign(sha256(signing_input).digest()) + signature_bytes = EC.decode_der_signature_to_padded_pair( + signature, pad_to=32 + ) + signature_b64 = _base64_url_encode_no_padding(signature_bytes) + + return f"{header_b64}.{payload_b64}.{signature_b64}" + + +def _build_add_dpop_header_handler(private_key): + """Builds a before-call handler for calculating and setting the DPoP header""" + + def _add_dpop_header_handler(**kwargs): + kwargs['params']['headers']['DPoP'] = _build_dpop_header( + private_key, kwargs['params']['url'] + ) + + return _add_dpop_header_handler + + +class LoginCredentialFetcher: + """ + Converts login access tokens from the cached token to + credentials, and supports refreshing them. + """ + + _REFRESH_THRESHOLD = 5 * 60 + _REQUIRED_TOKEN_FIELDS = ( + 'accessToken', + 'refreshToken', + 'dpopKey', + 'clientId', + ) + + def __init__( + self, + session_name, + token_loader, + client_creator, + time_fetcher=_local_now, + feature_ids=None, + ): + self._session_name = session_name + self._token_loader = token_loader + self._client_creator = client_creator + self._time_fetcher = time_fetcher + if feature_ids is None: + feature_ids = set() + self.feature_ids = feature_ids + + def load_cached_credentials(self): + """Loads cached credentials without checking their expiry.""" + token = self._token_loader.load_token(self._session_name) + + if token is None: + raise LoginTokenLoadError( + error_msg='Unable to load a existing login session for session ' + f'{self._session_name}. Please reauthenticate with ' + "'aws login'.", + ) + + missing_fields = [ + key for key in self._REQUIRED_TOKEN_FIELDS if key not in token + ] + if missing_fields: + raise LoginTokenLoadError( + error_msg=f'Failed to load access token from token cache, missing required fields: {", ".join(missing_fields)}.' + ) + + return self._token_to_credentials(token) + + def refresh_credentials(self): + """Refreshes login credentials, including saving them to the cache.""" + if self.feature_ids: + register_feature_ids(self.feature_ids) + # Reload the token from disk, we need the refresh info + token = self._token_loader.load_token(self._session_name) + private_key = self._load_private_key(token) + + # Check if token has already been refreshed and is still valid + if ( + token + and 'accessToken' in token + and 'expiresAt' in token['accessToken'] + ): + expiry_time = _parse_if_needed(token['accessToken']['expiresAt']) + remaining_time = total_seconds(expiry_time - self._time_fetcher()) + if remaining_time > self._REFRESH_THRESHOLD: + return self._token_to_credentials(token) + + config = botocore.config.Config( + signature_version=botocore.UNSIGNED, + ) + client = self._client_creator( + 'signin', + config=config, + ) + + client.meta.events.register( + 'before-call.signin.CreateOAuth2Token', + _build_add_dpop_header_handler(private_key), + ) + + try: + response = client.create_o_auth2_token( + tokenInput={ + 'clientId': token['clientId'], + 'refreshToken': token['refreshToken'], + 'grantType': 'refresh_token', + }, + ) + except client.exceptions.AccessDeniedException as e: + error_type = e.response.get('error', '') + if error_type in ('TOKEN_EXPIRED', 'USER_CREDENTIALS_CHANGED'): + raise LoginRefreshRequired() from e + elif error_type == 'INSUFFICIENT_PERMISSIONS': + raise LoginInsufficientPermissions() from e + raise LoginError() from e + + if response is None or 'tokenOutput' not in response: + raise LoginTokenLoadError( + error_msg=( + "Unable to refresh access token due to an invalid service response. " + "Please try running 'aws login' again. If the issue persists, there " + "may be a temporary signin service problem." + ) + ) + + output = response.get('tokenOutput') + + expires_timestamp = self._time_fetcher().astimezone( + tzutc() + ) + datetime.timedelta(seconds=output['expiresIn']) + + # Overwrite token with refreshed fields + token.update( + { + 'accessToken': { + 'accessKeyId': output['accessToken']['accessKeyId'], + 'secretAccessKey': output['accessToken'][ + 'secretAccessKey' + ], + 'sessionToken': output['accessToken']['sessionToken'], + 'accountId': token['accessToken']['accountId'], + 'expiresAt': expires_timestamp.strftime( + '%Y-%m-%dT%H:%M:%SZ' + ), + }, + 'refreshToken': output['refreshToken'], + } + ) + self._token_loader.save_token(self._session_name, token) + + return self._token_to_credentials(token) + + @staticmethod + def _token_to_credentials(token): + return { + 'access_key': token['accessToken']['accessKeyId'], + 'secret_key': token['accessToken']['secretAccessKey'], + 'token': token['accessToken']['sessionToken'], + 'expiry_time': token['accessToken']['expiresAt'], + 'account_id': token['accessToken']['accountId'], + } + + @staticmethod + def _load_private_key(token): + if 'dpopKey' not in token: + raise LoginTokenLoadError( + error_msg='Private key not found in cached token.' + ) + + # Remove the PEM header and footer lines + lines = token['dpopKey'].splitlines() + content_lines = [ + line + for line in lines + if not line.startswith('-----BEGIN') + and not line.startswith('-----END') + ] + + # strip should handle the optional newline at the end as well + contents = ''.join(content_lines).strip() + + try: + return EC.new_key_from_der_data(base64.b64decode(contents)) + except ValueError as e: + raise LoginTokenLoadError( + error_msg='Unable to load private key from cached token.' + ) from e + + +class LoginProvider(CredentialProvider): + METHOD = 'login' + + def __init__( + self, + load_config, + client_creator, + profile_name, + token_cache=None, + ): + super().__init__() + if token_cache is None: + token_cache = JSONFileCache(get_login_token_cache_directory()) + self._token_cache = token_cache + + self._load_config = load_config + self._client_creator = client_creator + self._profile_name = profile_name + self._feature_ids = {'CREDENTIALS_PROFILE_LOGIN', 'CREDENTIALS_LOGIN'} + + def load(self): + loaded_config = self._load_config() + profiles = loaded_config.get('profiles', {}) + profile_config = profiles.get(self._profile_name, {}) + + if 'login_session' not in profile_config: + return None + + if EC is None: + raise MissingDependencyException( + msg=( + "Using the login credential provider requires an " + "additional dependency. You will need to pip install " + "\"botocore[crt]\" before proceeding." + ) + ) + + fetcher = LoginCredentialFetcher( + session_name=profile_config['login_session'], + token_loader=LoginTokenLoader(self._token_cache), + client_creator=self._client_creator, + time_fetcher=_local_now, + feature_ids=self._feature_ids, + ) + + register_feature_ids(self._feature_ids) + + # Return the current cached credentials initially, + # regardless if they're expired + cached_credentials = fetcher.load_cached_credentials() + + return RefreshableCredentials( + access_key=cached_credentials['access_key'], + secret_key=cached_credentials['secret_key'], + token=cached_credentials['token'], + expiry_time=_parse_if_needed(cached_credentials['expiry_time']), + account_id=cached_credentials['account_id'], + method=self.METHOD, + refresh_using=fetcher.refresh_credentials, + time_fetcher=_local_now, + ) diff --git a/py311/lib/python3.11/site-packages/botocore/crt/__init__.py b/py311/lib/python3.11/site-packages/botocore/crt/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..952ebf34cc37bde64e7fcd14a9b252a205429f47 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/crt/__init__.py @@ -0,0 +1,27 @@ +# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +# A list of auth types supported by the signers in botocore/crt/auth.py. This +# should always match the keys of botocore.crt.auth.CRT_AUTH_TYPE_MAPS. The +# information is duplicated here so that it can be accessed in environments +# where `awscrt` is not present and any import from botocore.crt.auth would +# fail. +CRT_SUPPORTED_AUTH_TYPES = ( + 'v4', + 'v4-query', + 'v4a', + 's3v4', + 's3v4-query', + 's3v4a', + 's3v4a-query', +) diff --git a/py311/lib/python3.11/site-packages/botocore/crt/auth.py b/py311/lib/python3.11/site-packages/botocore/crt/auth.py new file mode 100644 index 0000000000000000000000000000000000000000..e36730e07ee60c578ac731e7dd0db856bb4f44a2 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/crt/auth.py @@ -0,0 +1,629 @@ +# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +from io import BytesIO + +from botocore.auth import ( + SIGNED_HEADERS_BLACKLIST, + STREAMING_UNSIGNED_PAYLOAD_TRAILER, + UNSIGNED_PAYLOAD, + BaseSigner, + _get_body_as_dict, + _host_from_url, +) +from botocore.compat import ( + HTTPHeaders, + awscrt, + get_current_datetime, + parse_qs, + urlsplit, + urlunsplit, +) +from botocore.exceptions import NoCredentialsError +from botocore.useragent import register_feature_id +from botocore.utils import percent_encode_sequence + + +class CrtSigV4Auth(BaseSigner): + REQUIRES_REGION = True + _PRESIGNED_HEADERS_BLOCKLIST = [ + 'Authorization', + 'X-Amz-Date', + 'X-Amz-Content-SHA256', + 'X-Amz-Security-Token', + ] + _SIGNATURE_TYPE = awscrt.auth.AwsSignatureType.HTTP_REQUEST_HEADERS + _USE_DOUBLE_URI_ENCODE = True + _SHOULD_NORMALIZE_URI_PATH = True + + def __init__(self, credentials, service_name, region_name): + self.credentials = credentials + self._service_name = service_name + self._region_name = region_name + self._expiration_in_seconds = None + + def _is_streaming_checksum_payload(self, request): + checksum_context = request.context.get('checksum', {}) + algorithm = checksum_context.get('request_algorithm') + return isinstance(algorithm, dict) and algorithm.get('in') == 'trailer' + + def add_auth(self, request): + if self.credentials is None: + raise NoCredentialsError() + + datetime_now = get_current_datetime(remove_tzinfo=False) + + # Use existing 'X-Amz-Content-SHA256' header if able + existing_sha256 = self._get_existing_sha256(request) + + self._modify_request_before_signing(request) + + credentials_provider = awscrt.auth.AwsCredentialsProvider.new_static( + access_key_id=self.credentials.access_key, + secret_access_key=self.credentials.secret_key, + session_token=self.credentials.token, + ) + + if self._is_streaming_checksum_payload(request): + explicit_payload = STREAMING_UNSIGNED_PAYLOAD_TRAILER + elif self._should_sha256_sign_payload(request): + if existing_sha256: + explicit_payload = existing_sha256 + else: + explicit_payload = None # to be calculated during signing + else: + explicit_payload = UNSIGNED_PAYLOAD + + if self._should_add_content_sha256_header(explicit_payload): + body_header = ( + awscrt.auth.AwsSignedBodyHeaderType.X_AMZ_CONTENT_SHA_256 + ) + else: + body_header = awscrt.auth.AwsSignedBodyHeaderType.NONE + + signing_config = awscrt.auth.AwsSigningConfig( + algorithm=awscrt.auth.AwsSigningAlgorithm.V4, + signature_type=self._SIGNATURE_TYPE, + credentials_provider=credentials_provider, + region=self._region_name, + service=self._service_name, + date=datetime_now, + should_sign_header=self._should_sign_header, + use_double_uri_encode=self._USE_DOUBLE_URI_ENCODE, + should_normalize_uri_path=self._SHOULD_NORMALIZE_URI_PATH, + signed_body_value=explicit_payload, + signed_body_header_type=body_header, + expiration_in_seconds=self._expiration_in_seconds, + ) + crt_request = self._crt_request_from_aws_request(request) + future = awscrt.auth.aws_sign_request(crt_request, signing_config) + future.result() + self._apply_signing_changes(request, crt_request) + + def _crt_request_from_aws_request(self, aws_request): + url_parts = urlsplit(aws_request.url) + crt_path = url_parts.path if url_parts.path else '/' + if aws_request.params: + array = [] + for param, value in aws_request.params.items(): + value = str(value) + array.append(f'{param}={value}') + crt_path = crt_path + '?' + '&'.join(array) + elif url_parts.query: + crt_path = f'{crt_path}?{url_parts.query}' + + crt_headers = awscrt.http.HttpHeaders(aws_request.headers.items()) + + # CRT requires body (if it exists) to be an I/O stream. + crt_body_stream = None + if aws_request.body: + if hasattr(aws_request.body, 'seek'): + crt_body_stream = aws_request.body + else: + crt_body_stream = BytesIO(aws_request.body) + + crt_request = awscrt.http.HttpRequest( + method=aws_request.method, + path=crt_path, + headers=crt_headers, + body_stream=crt_body_stream, + ) + return crt_request + + def _apply_signing_changes(self, aws_request, signed_crt_request): + # Apply changes from signed CRT request to the AWSRequest + aws_request.headers = HTTPHeaders.from_pairs( + list(signed_crt_request.headers) + ) + + def _should_sign_header(self, name, **kwargs): + return name.lower() not in SIGNED_HEADERS_BLACKLIST + + def _modify_request_before_signing(self, request): + # This could be a retry. Make sure the previous + # authorization headers are removed first. + for h in self._PRESIGNED_HEADERS_BLOCKLIST: + if h in request.headers: + del request.headers[h] + # If necessary, add the host header + if 'host' not in request.headers: + request.headers['host'] = _host_from_url(request.url) + + def _get_existing_sha256(self, request): + return request.headers.get('X-Amz-Content-SHA256') + + def _should_sha256_sign_payload(self, request): + # Payloads will always be signed over insecure connections. + if not request.url.startswith('https'): + return True + + # Certain operations may have payload signing disabled by default. + # Since we don't have access to the operation model, we pass in this + # bit of metadata through the request context. + return request.context.get('payload_signing_enabled', True) + + def _should_add_content_sha256_header(self, explicit_payload): + # only add X-Amz-Content-SHA256 header if payload is explicitly set + return explicit_payload is not None + + +class CrtS3SigV4Auth(CrtSigV4Auth): + # For S3, we do not normalize the path. + _USE_DOUBLE_URI_ENCODE = False + _SHOULD_NORMALIZE_URI_PATH = False + + def _get_existing_sha256(self, request): + # always recalculate + return None + + def _should_sha256_sign_payload(self, request): + # S3 allows optional body signing, so to minimize the performance + # impact, we opt to not SHA256 sign the body on streaming uploads, + # provided that we're on https. + client_config = request.context.get('client_config') + s3_config = getattr(client_config, 's3', None) + + # The config could be None if it isn't set, or if the customer sets it + # to None. + if s3_config is None: + s3_config = {} + + # The explicit configuration takes precedence over any implicit + # configuration. + sign_payload = s3_config.get('payload_signing_enabled', None) + if sign_payload is not None: + return sign_payload + + # We require that both a checksum be present and https be enabled + # to implicitly disable body signing. The combination of TLS and + # a checksum is sufficiently secure and durable for us to be + # confident in the request without body signing. + checksum_header = 'Content-MD5' + checksum_context = request.context.get('checksum', {}) + algorithm = checksum_context.get('request_algorithm') + if isinstance(algorithm, dict) and algorithm.get('in') == 'header': + checksum_header = algorithm['name'] + if ( + not request.url.startswith('https') + or checksum_header not in request.headers + ): + return True + + # If the input is streaming we disable body signing by default. + if request.context.get('has_streaming_input', False): + return False + + # If the S3-specific checks had no results, delegate to the generic + # checks. + return super()._should_sha256_sign_payload(request) + + def _should_add_content_sha256_header(self, explicit_payload): + # Always add X-Amz-Content-SHA256 header + return True + + +class CrtSigV4AsymAuth(BaseSigner): + REQUIRES_REGION = True + _PRESIGNED_HEADERS_BLOCKLIST = [ + 'Authorization', + 'X-Amz-Date', + 'X-Amz-Content-SHA256', + 'X-Amz-Security-Token', + ] + _SIGNATURE_TYPE = awscrt.auth.AwsSignatureType.HTTP_REQUEST_HEADERS + _USE_DOUBLE_URI_ENCODE = True + _SHOULD_NORMALIZE_URI_PATH = True + + def __init__(self, credentials, service_name, region_name): + self.credentials = credentials + self._service_name = service_name + self._region_name = region_name + self._expiration_in_seconds = None + + def add_auth(self, request): + register_feature_id("SIGV4A_SIGNING") + if self.credentials is None: + raise NoCredentialsError() + + datetime_now = get_current_datetime(remove_tzinfo=False) + + # Use existing 'X-Amz-Content-SHA256' header if able + existing_sha256 = self._get_existing_sha256(request) + + self._modify_request_before_signing(request) + + credentials_provider = awscrt.auth.AwsCredentialsProvider.new_static( + access_key_id=self.credentials.access_key, + secret_access_key=self.credentials.secret_key, + session_token=self.credentials.token, + ) + + if self._is_streaming_checksum_payload(request): + explicit_payload = STREAMING_UNSIGNED_PAYLOAD_TRAILER + elif self._should_sha256_sign_payload(request): + if existing_sha256: + explicit_payload = existing_sha256 + else: + explicit_payload = None # to be calculated during signing + else: + explicit_payload = UNSIGNED_PAYLOAD + + if self._should_add_content_sha256_header(explicit_payload): + body_header = ( + awscrt.auth.AwsSignedBodyHeaderType.X_AMZ_CONTENT_SHA_256 + ) + else: + body_header = awscrt.auth.AwsSignedBodyHeaderType.NONE + + signing_config = awscrt.auth.AwsSigningConfig( + algorithm=awscrt.auth.AwsSigningAlgorithm.V4_ASYMMETRIC, + signature_type=self._SIGNATURE_TYPE, + credentials_provider=credentials_provider, + region=self._region_name, + service=self._service_name, + date=datetime_now, + should_sign_header=self._should_sign_header, + use_double_uri_encode=self._USE_DOUBLE_URI_ENCODE, + should_normalize_uri_path=self._SHOULD_NORMALIZE_URI_PATH, + signed_body_value=explicit_payload, + signed_body_header_type=body_header, + expiration_in_seconds=self._expiration_in_seconds, + ) + crt_request = self._crt_request_from_aws_request(request) + future = awscrt.auth.aws_sign_request(crt_request, signing_config) + future.result() + self._apply_signing_changes(request, crt_request) + + def _crt_request_from_aws_request(self, aws_request): + url_parts = urlsplit(aws_request.url) + crt_path = url_parts.path if url_parts.path else '/' + if aws_request.params: + array = [] + for param, value in aws_request.params.items(): + value = str(value) + array.append(f'{param}={value}') + crt_path = crt_path + '?' + '&'.join(array) + elif url_parts.query: + crt_path = f'{crt_path}?{url_parts.query}' + + crt_headers = awscrt.http.HttpHeaders(aws_request.headers.items()) + + # CRT requires body (if it exists) to be an I/O stream. + crt_body_stream = None + if aws_request.body: + if hasattr(aws_request.body, 'seek'): + crt_body_stream = aws_request.body + else: + crt_body_stream = BytesIO(aws_request.body) + + crt_request = awscrt.http.HttpRequest( + method=aws_request.method, + path=crt_path, + headers=crt_headers, + body_stream=crt_body_stream, + ) + return crt_request + + def _apply_signing_changes(self, aws_request, signed_crt_request): + # Apply changes from signed CRT request to the AWSRequest + aws_request.headers = HTTPHeaders.from_pairs( + list(signed_crt_request.headers) + ) + + def _should_sign_header(self, name, **kwargs): + return name.lower() not in SIGNED_HEADERS_BLACKLIST + + def _modify_request_before_signing(self, request): + # This could be a retry. Make sure the previous + # authorization headers are removed first. + for h in self._PRESIGNED_HEADERS_BLOCKLIST: + if h in request.headers: + del request.headers[h] + # If necessary, add the host header + if 'host' not in request.headers: + request.headers['host'] = _host_from_url(request.url) + + def _get_existing_sha256(self, request): + return request.headers.get('X-Amz-Content-SHA256') + + def _is_streaming_checksum_payload(self, request): + checksum_context = request.context.get('checksum', {}) + algorithm = checksum_context.get('request_algorithm') + return isinstance(algorithm, dict) and algorithm.get('in') == 'trailer' + + def _should_sha256_sign_payload(self, request): + # Payloads will always be signed over insecure connections. + if not request.url.startswith('https'): + return True + + # Certain operations may have payload signing disabled by default. + # Since we don't have access to the operation model, we pass in this + # bit of metadata through the request context. + return request.context.get('payload_signing_enabled', True) + + def _should_add_content_sha256_header(self, explicit_payload): + # only add X-Amz-Content-SHA256 header if payload is explicitly set + return explicit_payload is not None + + +class CrtS3SigV4AsymAuth(CrtSigV4AsymAuth): + # For S3, we do not normalize the path. + _USE_DOUBLE_URI_ENCODE = False + _SHOULD_NORMALIZE_URI_PATH = False + + def _get_existing_sha256(self, request): + # always recalculate + return None + + def _should_sha256_sign_payload(self, request): + # S3 allows optional body signing, so to minimize the performance + # impact, we opt to not SHA256 sign the body on streaming uploads, + # provided that we're on https. + client_config = request.context.get('client_config') + s3_config = getattr(client_config, 's3', None) + + # The config could be None if it isn't set, or if the customer sets it + # to None. + if s3_config is None: + s3_config = {} + + # The explicit configuration takes precedence over any implicit + # configuration. + sign_payload = s3_config.get('payload_signing_enabled', None) + if sign_payload is not None: + return sign_payload + + # We require that both content-md5 be present and https be enabled + # to implicitly disable body signing. The combination of TLS and + # content-md5 is sufficiently secure and durable for us to be + # confident in the request without body signing. + if ( + not request.url.startswith('https') + or 'Content-MD5' not in request.headers + ): + return True + + # If the input is streaming we disable body signing by default. + if request.context.get('has_streaming_input', False): + return False + + # If the S3-specific checks had no results, delegate to the generic + # checks. + return super()._should_sha256_sign_payload(request) + + def _should_add_content_sha256_header(self, explicit_payload): + # Always add X-Amz-Content-SHA256 header + return True + + +class CrtSigV4AsymQueryAuth(CrtSigV4AsymAuth): + DEFAULT_EXPIRES = 3600 + _SIGNATURE_TYPE = awscrt.auth.AwsSignatureType.HTTP_REQUEST_QUERY_PARAMS + + def __init__( + self, credentials, service_name, region_name, expires=DEFAULT_EXPIRES + ): + super().__init__(credentials, service_name, region_name) + self._expiration_in_seconds = expires + + def _modify_request_before_signing(self, request): + super()._modify_request_before_signing(request) + + # We automatically set this header, so if it's the auto-set value we + # want to get rid of it since it doesn't make sense for presigned urls. + content_type = request.headers.get('content-type') + if content_type == 'application/x-www-form-urlencoded; charset=utf-8': + del request.headers['content-type'] + + # Now parse the original query string to a dict, inject our new query + # params, and serialize back to a query string. + url_parts = urlsplit(request.url) + # parse_qs makes each value a list, but in our case we know we won't + # have repeated keys so we know we have single element lists which we + # can convert back to scalar values. + query_string_parts = parse_qs(url_parts.query, keep_blank_values=True) + query_dict = {k: v[0] for k, v in query_string_parts.items()} + + # The spec is particular about this. It *has* to be: + # https://?& + # You can't mix the two types of params together, i.e just keep doing + # new_query_params.update(op_params) + # new_query_params.update(auth_params) + # percent_encode_sequence(new_query_params) + if request.data: + # We also need to move the body params into the query string. To + # do this, we first have to convert it to a dict. + query_dict.update(_get_body_as_dict(request)) + request.data = '' + new_query_string = percent_encode_sequence(query_dict) + # url_parts is a tuple (and therefore immutable) so we need to create + # a new url_parts with the new query string. + # - + # scheme - 0 + # netloc - 1 + # path - 2 + # query - 3 <-- we're replacing this. + # fragment - 4 + p = url_parts + new_url_parts = (p[0], p[1], p[2], new_query_string, p[4]) + request.url = urlunsplit(new_url_parts) + + def _apply_signing_changes(self, aws_request, signed_crt_request): + # Apply changes from signed CRT request to the AWSRequest + super()._apply_signing_changes(aws_request, signed_crt_request) + + signed_query = urlsplit(signed_crt_request.path).query + p = urlsplit(aws_request.url) + # urlsplit() returns a tuple (and therefore immutable) so we + # need to create new url with the new query string. + # - + # scheme - 0 + # netloc - 1 + # path - 2 + # query - 3 <-- we're replacing this. + # fragment - 4 + aws_request.url = urlunsplit((p[0], p[1], p[2], signed_query, p[4])) + + +class CrtS3SigV4AsymQueryAuth(CrtSigV4AsymQueryAuth): + """S3 SigV4A auth using query parameters. + This signer will sign a request using query parameters and signature + version 4A, i.e a "presigned url" signer. + """ + + # For S3, we do not normalize the path. + _USE_DOUBLE_URI_ENCODE = False + _SHOULD_NORMALIZE_URI_PATH = False + + def _should_sha256_sign_payload(self, request): + # From the doc link above: + # "You don't include a payload hash in the Canonical Request, because + # when you create a presigned URL, you don't know anything about the + # payload. Instead, you use a constant string "UNSIGNED-PAYLOAD". + return False + + def _should_add_content_sha256_header(self, explicit_payload): + # Never add X-Amz-Content-SHA256 header + return False + + +class CrtSigV4QueryAuth(CrtSigV4Auth): + DEFAULT_EXPIRES = 3600 + _SIGNATURE_TYPE = awscrt.auth.AwsSignatureType.HTTP_REQUEST_QUERY_PARAMS + + def __init__( + self, credentials, service_name, region_name, expires=DEFAULT_EXPIRES + ): + super().__init__(credentials, service_name, region_name) + self._expiration_in_seconds = expires + + def _modify_request_before_signing(self, request): + super()._modify_request_before_signing(request) + + # We automatically set this header, so if it's the auto-set value we + # want to get rid of it since it doesn't make sense for presigned urls. + content_type = request.headers.get('content-type') + if content_type == 'application/x-www-form-urlencoded; charset=utf-8': + del request.headers['content-type'] + + # Now parse the original query string to a dict, inject our new query + # params, and serialize back to a query string. + url_parts = urlsplit(request.url) + # parse_qs makes each value a list, but in our case we know we won't + # have repeated keys so we know we have single element lists which we + # can convert back to scalar values. + query_dict = { + k: v[0] + for k, v in parse_qs( + url_parts.query, keep_blank_values=True + ).items() + } + if request.params: + query_dict.update(request.params) + request.params = {} + # The spec is particular about this. It *has* to be: + # https://?& + # You can't mix the two types of params together, i.e just keep doing + # new_query_params.update(op_params) + # new_query_params.update(auth_params) + # percent_encode_sequence(new_query_params) + if request.data: + # We also need to move the body params into the query string. To + # do this, we first have to convert it to a dict. + query_dict.update(_get_body_as_dict(request)) + request.data = '' + new_query_string = percent_encode_sequence(query_dict) + # url_parts is a tuple (and therefore immutable) so we need to create + # a new url_parts with the new query string. + # - + # scheme - 0 + # netloc - 1 + # path - 2 + # query - 3 <-- we're replacing this. + # fragment - 4 + p = url_parts + new_url_parts = (p[0], p[1], p[2], new_query_string, p[4]) + request.url = urlunsplit(new_url_parts) + + def _apply_signing_changes(self, aws_request, signed_crt_request): + # Apply changes from signed CRT request to the AWSRequest + super()._apply_signing_changes(aws_request, signed_crt_request) + + signed_query = urlsplit(signed_crt_request.path).query + p = urlsplit(aws_request.url) + # urlsplit() returns a tuple (and therefore immutable) so we + # need to create new url with the new query string. + # - + # scheme - 0 + # netloc - 1 + # path - 2 + # query - 3 <-- we're replacing this. + # fragment - 4 + aws_request.url = urlunsplit((p[0], p[1], p[2], signed_query, p[4])) + + +class CrtS3SigV4QueryAuth(CrtSigV4QueryAuth): + """S3 SigV4 auth using query parameters. + This signer will sign a request using query parameters and signature + version 4, i.e a "presigned url" signer. + Based off of: + http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html + """ + + # For S3, we do not normalize the path. + _USE_DOUBLE_URI_ENCODE = False + _SHOULD_NORMALIZE_URI_PATH = False + + def _should_sha256_sign_payload(self, request): + # From the doc link above: + # "You don't include a payload hash in the Canonical Request, because + # when you create a presigned URL, you don't know anything about the + # payload. Instead, you use a constant string "UNSIGNED-PAYLOAD". + return False + + def _should_add_content_sha256_header(self, explicit_payload): + # Never add X-Amz-Content-SHA256 header + return False + + +# Defined at the bottom of module to ensure all Auth +# classes are defined. +CRT_AUTH_TYPE_MAPS = { + 'v4': CrtSigV4Auth, + 'v4-query': CrtSigV4QueryAuth, + 'v4a': CrtSigV4AsymAuth, + 's3v4': CrtS3SigV4Auth, + 's3v4-query': CrtS3SigV4QueryAuth, + 's3v4a': CrtS3SigV4AsymAuth, + 's3v4a-query': CrtS3SigV4AsymQueryAuth, +} diff --git a/py311/lib/python3.11/site-packages/botocore/discovery.py b/py311/lib/python3.11/site-packages/botocore/discovery.py new file mode 100644 index 0000000000000000000000000000000000000000..3d16e97dd1f66e1db5c871adf990310676210f86 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/discovery.py @@ -0,0 +1,282 @@ +# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import logging +import time +import weakref + +from botocore import xform_name +from botocore.exceptions import BotoCoreError, ConnectionError, HTTPClientError +from botocore.model import OperationNotFoundError +from botocore.utils import CachedProperty + +logger = logging.getLogger(__name__) + + +class EndpointDiscoveryException(BotoCoreError): + pass + + +class EndpointDiscoveryRequired(EndpointDiscoveryException): + """Endpoint Discovery is disabled but is required for this operation.""" + + fmt = 'Endpoint Discovery is not enabled but this operation requires it.' + + +class EndpointDiscoveryRefreshFailed(EndpointDiscoveryException): + """Endpoint Discovery failed to the refresh the known endpoints.""" + + fmt = 'Endpoint Discovery failed to refresh the required endpoints.' + + +def block_endpoint_discovery_required_operations(model, **kwargs): + endpoint_discovery = model.endpoint_discovery + if endpoint_discovery and endpoint_discovery.get('required'): + raise EndpointDiscoveryRequired() + + +class EndpointDiscoveryModel: + def __init__(self, service_model): + self._service_model = service_model + + @CachedProperty + def discovery_operation_name(self): + discovery_operation = self._service_model.endpoint_discovery_operation + return xform_name(discovery_operation.name) + + @CachedProperty + def discovery_operation_keys(self): + discovery_operation = self._service_model.endpoint_discovery_operation + keys = [] + if discovery_operation.input_shape: + keys = list(discovery_operation.input_shape.members.keys()) + return keys + + def discovery_required_for(self, operation_name): + try: + operation_model = self._service_model.operation_model( + operation_name + ) + return operation_model.endpoint_discovery.get('required', False) + except OperationNotFoundError: + return False + + def discovery_operation_kwargs(self, **kwargs): + input_keys = self.discovery_operation_keys + # Operation and Identifiers are only sent if there are Identifiers + if not kwargs.get('Identifiers'): + kwargs.pop('Operation', None) + kwargs.pop('Identifiers', None) + return {k: v for k, v in kwargs.items() if k in input_keys} + + def gather_identifiers(self, operation, params): + return self._gather_ids(operation.input_shape, params) + + def _gather_ids(self, shape, params, ids=None): + # Traverse the input shape and corresponding parameters, gathering + # any input fields labeled as an endpoint discovery id + if ids is None: + ids = {} + for member_name, member_shape in shape.members.items(): + if member_shape.metadata.get('endpointdiscoveryid'): + ids[member_name] = params[member_name] + elif ( + member_shape.type_name == 'structure' and member_name in params + ): + self._gather_ids(member_shape, params[member_name], ids) + return ids + + +class EndpointDiscoveryManager: + def __init__( + self, client, cache=None, current_time=None, always_discover=True + ): + if cache is None: + cache = {} + self._cache = cache + self._failed_attempts = {} + if current_time is None: + current_time = time.time + self._time = current_time + self._always_discover = always_discover + + # This needs to be a weak ref in order to prevent memory leaks on + # python 2.6 + self._client = weakref.proxy(client) + self._model = EndpointDiscoveryModel(client.meta.service_model) + + def _parse_endpoints(self, response): + endpoints = response['Endpoints'] + current_time = self._time() + for endpoint in endpoints: + cache_time = endpoint.get('CachePeriodInMinutes') + endpoint['Expiration'] = current_time + cache_time * 60 + return endpoints + + def _cache_item(self, value): + if isinstance(value, dict): + return tuple(sorted(value.items())) + else: + return value + + def _create_cache_key(self, **kwargs): + kwargs = self._model.discovery_operation_kwargs(**kwargs) + return tuple(self._cache_item(v) for k, v in sorted(kwargs.items())) + + def gather_identifiers(self, operation, params): + return self._model.gather_identifiers(operation, params) + + def delete_endpoints(self, **kwargs): + cache_key = self._create_cache_key(**kwargs) + if cache_key in self._cache: + del self._cache[cache_key] + + def _describe_endpoints(self, **kwargs): + # This is effectively a proxy to whatever name/kwargs the service + # supports for endpoint discovery. + kwargs = self._model.discovery_operation_kwargs(**kwargs) + operation_name = self._model.discovery_operation_name + discovery_operation = getattr(self._client, operation_name) + logger.debug('Discovering endpoints with kwargs: %s', kwargs) + return discovery_operation(**kwargs) + + def _get_current_endpoints(self, key): + if key not in self._cache: + return None + now = self._time() + return [e for e in self._cache[key] if now < e['Expiration']] + + def _refresh_current_endpoints(self, **kwargs): + cache_key = self._create_cache_key(**kwargs) + try: + response = self._describe_endpoints(**kwargs) + endpoints = self._parse_endpoints(response) + self._cache[cache_key] = endpoints + self._failed_attempts.pop(cache_key, None) + return endpoints + except (ConnectionError, HTTPClientError): + self._failed_attempts[cache_key] = self._time() + 60 + return None + + def _recently_failed(self, cache_key): + if cache_key in self._failed_attempts: + now = self._time() + if now < self._failed_attempts[cache_key]: + return True + del self._failed_attempts[cache_key] + return False + + def _select_endpoint(self, endpoints): + return endpoints[0]['Address'] + + def describe_endpoint(self, **kwargs): + operation = kwargs['Operation'] + discovery_required = self._model.discovery_required_for(operation) + + if not self._always_discover and not discovery_required: + # Discovery set to only run on required operations + logger.debug( + 'Optional discovery disabled. Skipping discovery for Operation: %s', + operation, + ) + return None + + # Get the endpoint for the provided operation and identifiers + cache_key = self._create_cache_key(**kwargs) + endpoints = self._get_current_endpoints(cache_key) + if endpoints: + return self._select_endpoint(endpoints) + # All known endpoints are stale + recently_failed = self._recently_failed(cache_key) + if not recently_failed: + # We haven't failed to discover recently, go ahead and refresh + endpoints = self._refresh_current_endpoints(**kwargs) + if endpoints: + return self._select_endpoint(endpoints) + # Discovery has failed recently, do our best to get an endpoint + logger.debug('Endpoint Discovery has failed for: %s', kwargs) + stale_entries = self._cache.get(cache_key, None) + if stale_entries: + # We have stale entries, use those while discovery is failing + return self._select_endpoint(stale_entries) + if discovery_required: + # It looks strange to be checking recently_failed again but, + # this informs us as to whether or not we tried to refresh earlier + if recently_failed: + # Discovery is required and we haven't already refreshed + endpoints = self._refresh_current_endpoints(**kwargs) + if endpoints: + return self._select_endpoint(endpoints) + # No endpoints even refresh, raise hard error + raise EndpointDiscoveryRefreshFailed() + # Discovery is optional, just use the default endpoint for now + return None + + +class EndpointDiscoveryHandler: + def __init__(self, manager): + self._manager = manager + + def register(self, events, service_id): + events.register( + f'before-parameter-build.{service_id}', self.gather_identifiers + ) + events.register_first( + f'request-created.{service_id}', self.discover_endpoint + ) + events.register(f'needs-retry.{service_id}', self.handle_retries) + + def gather_identifiers(self, params, model, context, **kwargs): + endpoint_discovery = model.endpoint_discovery + # Only continue if the operation supports endpoint discovery + if endpoint_discovery is None: + return + ids = self._manager.gather_identifiers(model, params) + context['discovery'] = {'identifiers': ids} + + def discover_endpoint(self, request, operation_name, **kwargs): + ids = request.context.get('discovery', {}).get('identifiers') + if ids is None: + return + endpoint = self._manager.describe_endpoint( + Operation=operation_name, Identifiers=ids + ) + if endpoint is None: + logger.debug('Failed to discover and inject endpoint') + return + if not endpoint.startswith('http'): + endpoint = 'https://' + endpoint + logger.debug('Injecting discovered endpoint: %s', endpoint) + request.url = endpoint + + def handle_retries(self, request_dict, response, operation, **kwargs): + if response is None: + return None + + _, response = response + status = response.get('ResponseMetadata', {}).get('HTTPStatusCode') + error_code = response.get('Error', {}).get('Code') + if status != 421 and error_code != 'InvalidEndpointException': + return None + + context = request_dict.get('context', {}) + ids = context.get('discovery', {}).get('identifiers') + if ids is None: + return None + + # Delete the cached endpoints, forcing a refresh on retry + # TODO: Improve eviction behavior to only evict the bad endpoint if + # there are multiple. This will almost certainly require a lock. + self._manager.delete_endpoints( + Operation=operation.name, Identifiers=ids + ) + return 0 diff --git a/py311/lib/python3.11/site-packages/botocore/docs/__init__.py b/py311/lib/python3.11/site-packages/botocore/docs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..844f5de59b05a334d9142fdbf087d6870e94970d --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/docs/__init__.py @@ -0,0 +1,54 @@ +# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import os + +from botocore.docs.service import ServiceDocumenter + +DEPRECATED_SERVICE_NAMES = {'sms-voice'} + + +def generate_docs(root_dir, session): + """Generates the reference documentation for botocore + + This will go through every available AWS service and output ReSTructured + text files documenting each service. + + :param root_dir: The directory to write the reference files to. Each + service's reference documentation is loacated at + root_dir/reference/services/service-name.rst + """ + # Create the root directory where all service docs live. + services_dir_path = os.path.join(root_dir, 'reference', 'services') + if not os.path.exists(services_dir_path): + os.makedirs(services_dir_path) + + # Prevents deprecated service names from being generated in docs. + available_services = [ + service + for service in session.get_available_services() + if service not in DEPRECATED_SERVICE_NAMES + ] + + # Generate reference docs and write them out. + for service_name in available_services: + docs = ServiceDocumenter( + service_name, session, services_dir_path + ).document_service() + + # Write the main service documentation page. + # Path: /reference/services//index.rst + service_file_path = os.path.join( + services_dir_path, f'{service_name}.rst' + ) + with open(service_file_path, 'wb') as f: + f.write(docs) diff --git a/py311/lib/python3.11/site-packages/botocore/docs/client.py b/py311/lib/python3.11/site-packages/botocore/docs/client.py new file mode 100644 index 0000000000000000000000000000000000000000..41e37426ec8f8d713f52a8678763f6f77b61dac1 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/docs/client.py @@ -0,0 +1,453 @@ +# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import os + +from botocore import xform_name +from botocore.compat import OrderedDict +from botocore.docs.bcdoc.restdoc import DocumentStructure +from botocore.docs.example import ResponseExampleDocumenter +from botocore.docs.method import ( + document_custom_method, + document_model_driven_method, + get_instance_public_methods, +) +from botocore.docs.params import ResponseParamsDocumenter +from botocore.docs.sharedexample import document_shared_examples +from botocore.docs.utils import DocumentedShape, get_official_service_name + + +def _allowlist_generate_presigned_url(method_name, service_name, **kwargs): + if method_name != 'generate_presigned_url': + return None + return service_name in ['s3'] + + +class ClientDocumenter: + _CLIENT_METHODS_FILTERS = [ + _allowlist_generate_presigned_url, + ] + + def __init__(self, client, root_docs_path, shared_examples=None): + self._client = client + self._client_class_name = self._client.__class__.__name__ + self._root_docs_path = root_docs_path + self._shared_examples = shared_examples + if self._shared_examples is None: + self._shared_examples = {} + self._service_name = self._client.meta.service_model.service_name + + def document_client(self, section): + """Documents a client and its methods + + :param section: The section to write to. + """ + self._add_title(section) + self._add_class_signature(section) + client_methods = self._get_client_methods() + self._add_client_intro(section, client_methods) + self._add_client_methods(client_methods) + + def _get_client_methods(self): + client_methods = get_instance_public_methods(self._client) + return self._filter_client_methods(client_methods) + + def _filter_client_methods(self, client_methods): + filtered_methods = {} + for method_name, method in client_methods.items(): + include = self._filter_client_method( + method=method, + method_name=method_name, + service_name=self._service_name, + ) + if include: + filtered_methods[method_name] = method + return filtered_methods + + def _filter_client_method(self, **kwargs): + # Apply each filter to the method + for filter in self._CLIENT_METHODS_FILTERS: + filter_include = filter(**kwargs) + # Use the first non-None value returned by any of the filters + if filter_include is not None: + return filter_include + # Otherwise default to including it + return True + + def _add_title(self, section): + section.style.h2('Client') + + def _add_client_intro(self, section, client_methods): + section = section.add_new_section('intro') + # Write out the top level description for the client. + official_service_name = get_official_service_name( + self._client.meta.service_model + ) + section.write( + f"A low-level client representing {official_service_name}" + ) + section.style.new_line() + section.include_doc_string( + self._client.meta.service_model.documentation + ) + + # Write out the client example instantiation. + self._add_client_creation_example(section) + + # List out all of the possible client methods. + section.style.dedent() + section.style.new_paragraph() + section.writeln('These are the available methods:') + section.style.toctree() + for method_name in sorted(client_methods): + section.style.tocitem(f'{self._service_name}/client/{method_name}') + + def _add_class_signature(self, section): + section.style.start_sphinx_py_class( + class_name=f'{self._client_class_name}.Client' + ) + + def _add_client_creation_example(self, section): + section.style.start_codeblock() + section.style.new_line() + section.write( + f'client = session.create_client(\'{self._service_name}\')' + ) + section.style.end_codeblock() + + def _add_client_methods(self, client_methods): + for method_name in sorted(client_methods): + # Create a new DocumentStructure for each client method and add contents. + method_doc_structure = DocumentStructure( + method_name, target='html' + ) + self._add_client_method( + method_doc_structure, method_name, client_methods[method_name] + ) + # Write client methods in individual/nested files. + # Path: /reference/services//client/.rst + client_dir_path = os.path.join( + self._root_docs_path, self._service_name, 'client' + ) + method_doc_structure.write_to_file(client_dir_path, method_name) + + def _add_client_method(self, section, method_name, method): + breadcrumb_section = section.add_new_section('breadcrumb') + breadcrumb_section.style.ref( + self._client_class_name, f'../../{self._service_name}' + ) + breadcrumb_section.write(f' / Client / {method_name}') + section.add_title_section(method_name) + method_section = section.add_new_section( + method_name, + context={'qualifier': f'{self._client_class_name}.Client.'}, + ) + if self._is_custom_method(method_name): + self._add_custom_method( + method_section, + method_name, + method, + ) + else: + self._add_model_driven_method(method_section, method_name) + + def _is_custom_method(self, method_name): + return method_name not in self._client.meta.method_to_api_mapping + + def _add_custom_method(self, section, method_name, method): + document_custom_method(section, method_name, method) + + def _add_method_exceptions_list(self, section, operation_model): + error_section = section.add_new_section('exceptions') + error_section.style.new_line() + error_section.style.bold('Exceptions') + error_section.style.new_line() + for error in operation_model.error_shapes: + class_name = ( + f'{self._client_class_name}.Client.exceptions.{error.name}' + ) + error_section.style.li(f':py:class:`{class_name}`') + + def _add_model_driven_method(self, section, method_name): + service_model = self._client.meta.service_model + operation_name = self._client.meta.method_to_api_mapping[method_name] + operation_model = service_model.operation_model(operation_name) + + example_prefix = f'response = client.{method_name}' + full_method_name = ( + f"{section.context.get('qualifier', '')}{method_name}" + ) + document_model_driven_method( + section, + full_method_name, + operation_model, + event_emitter=self._client.meta.events, + method_description=operation_model.documentation, + example_prefix=example_prefix, + ) + + # Add any modeled exceptions + if operation_model.error_shapes: + self._add_method_exceptions_list(section, operation_model) + + # Add the shared examples + shared_examples = self._shared_examples.get(operation_name) + if shared_examples: + document_shared_examples( + section, operation_model, example_prefix, shared_examples + ) + + +class ClientExceptionsDocumenter: + _USER_GUIDE_LINK = ( + 'https://boto3.amazonaws.com/' + 'v1/documentation/api/latest/guide/error-handling.html' + ) + _GENERIC_ERROR_SHAPE = DocumentedShape( + name='Error', + type_name='structure', + documentation=('Normalized access to common exception attributes.'), + members=OrderedDict( + [ + ( + 'Code', + DocumentedShape( + name='Code', + type_name='string', + documentation=( + 'An identifier specifying the exception type.' + ), + ), + ), + ( + 'Message', + DocumentedShape( + name='Message', + type_name='string', + documentation=( + 'A descriptive message explaining why the exception ' + 'occured.' + ), + ), + ), + ] + ), + ) + + def __init__(self, client, root_docs_path): + self._client = client + self._client_class_name = self._client.__class__.__name__ + self._service_name = self._client.meta.service_model.service_name + self._root_docs_path = root_docs_path + + def document_exceptions(self, section): + self._add_title(section) + self._add_overview(section) + self._add_exceptions_list(section) + self._add_exception_classes() + + def _add_title(self, section): + section.style.h2('Client Exceptions') + + def _add_overview(self, section): + section.style.new_line() + section.write( + 'Client exceptions are available on a client instance ' + 'via the ``exceptions`` property. For more detailed instructions ' + 'and examples on the exact usage of client exceptions, see the ' + 'error handling ' + ) + section.style.external_link( + title='user guide', + link=self._USER_GUIDE_LINK, + ) + section.write('.') + section.style.new_line() + + def _exception_class_name(self, shape): + return f'{self._client_class_name}.Client.exceptions.{shape.name}' + + def _add_exceptions_list(self, section): + error_shapes = self._client.meta.service_model.error_shapes + if not error_shapes: + section.style.new_line() + section.write('This client has no modeled exception classes.') + section.style.new_line() + return + section.style.new_line() + section.writeln('The available client exceptions are:') + section.style.toctree() + for shape in error_shapes: + section.style.tocitem( + f'{self._service_name}/client/exceptions/{shape.name}' + ) + + def _add_exception_classes(self): + for shape in self._client.meta.service_model.error_shapes: + # Create a new DocumentStructure for each exception method and add contents. + exception_doc_structure = DocumentStructure( + shape.name, target='html' + ) + self._add_exception_class(exception_doc_structure, shape) + # Write exceptions in individual/nested files. + # Path: /reference/services//client/exceptions/.rst + exception_dir_path = os.path.join( + self._root_docs_path, + self._service_name, + 'client', + 'exceptions', + ) + exception_doc_structure.write_to_file( + exception_dir_path, shape.name + ) + + def _add_exception_class(self, section, shape): + breadcrumb_section = section.add_new_section('breadcrumb') + breadcrumb_section.style.ref( + self._client_class_name, f'../../../{self._service_name}' + ) + breadcrumb_section.write(f' / Client / exceptions / {shape.name}') + section.add_title_section(shape.name) + class_section = section.add_new_section(shape.name) + class_name = self._exception_class_name(shape) + class_section.style.start_sphinx_py_class(class_name=class_name) + self._add_top_level_documentation(class_section, shape) + self._add_exception_catch_example(class_section, shape) + self._add_response_attr(class_section, shape) + class_section.style.end_sphinx_py_class() + + def _add_top_level_documentation(self, section, shape): + if shape.documentation: + section.style.new_line() + section.include_doc_string(shape.documentation) + section.style.new_line() + + def _add_exception_catch_example(self, section, shape): + section.style.new_line() + section.style.bold('Example') + section.style.new_paragraph() + section.style.start_codeblock() + section.write('try:') + section.style.indent() + section.style.new_line() + section.write('...') + section.style.dedent() + section.style.new_line() + section.write(f'except client.exceptions.{shape.name} as e:') + section.style.indent() + section.style.new_line() + section.write('print(e.response)') + section.style.dedent() + section.style.end_codeblock() + + def _add_response_attr(self, section, shape): + response_section = section.add_new_section('response') + response_section.style.start_sphinx_py_attr('response') + self._add_response_attr_description(response_section) + self._add_response_example(response_section, shape) + self._add_response_params(response_section, shape) + response_section.style.end_sphinx_py_attr() + + def _add_response_attr_description(self, section): + section.style.new_line() + section.include_doc_string( + 'The parsed error response. All exceptions have a top level ' + '``Error`` key that provides normalized access to common ' + 'exception atrributes. All other keys are specific to this ' + 'service or exception class.' + ) + section.style.new_line() + + def _add_response_example(self, section, shape): + example_section = section.add_new_section('syntax') + example_section.style.new_line() + example_section.style.bold('Syntax') + example_section.style.new_paragraph() + documenter = ResponseExampleDocumenter( + service_name=self._service_name, + operation_name=None, + event_emitter=self._client.meta.events, + ) + documenter.document_example( + example_section, + shape, + include=[self._GENERIC_ERROR_SHAPE], + ) + + def _add_response_params(self, section, shape): + params_section = section.add_new_section('Structure') + params_section.style.new_line() + params_section.style.bold('Structure') + params_section.style.new_paragraph() + documenter = ResponseParamsDocumenter( + service_name=self._service_name, + operation_name=None, + event_emitter=self._client.meta.events, + ) + documenter.document_params( + params_section, + shape, + include=[self._GENERIC_ERROR_SHAPE], + ) + + +class ClientContextParamsDocumenter: + _CONFIG_GUIDE_LINK = ( + 'https://boto3.amazonaws.com/' + 'v1/documentation/api/latest/guide/configuration.html' + ) + + OMITTED_CONTEXT_PARAMS = { + 's3': ( + 'Accelerate', + 'DisableMultiRegionAccessPoints', + 'ForcePathStyle', + 'UseArnRegion', + ), + 's3control': ('UseArnRegion',), + } + + def __init__(self, service_name, context_params): + self._service_name = service_name + self._context_params = context_params + + def document_context_params(self, section): + self._add_title(section) + self._add_overview(section) + self._add_context_params_list(section) + + def _add_title(self, section): + section.style.h2('Client Context Parameters') + + def _add_overview(self, section): + section.style.new_line() + section.write( + 'Client context parameters are configurable on a client ' + 'instance via the ``client_context_params`` parameter in the ' + '``Config`` object. For more detailed instructions and examples ' + 'on the exact usage of context params see the ' + ) + section.style.external_link( + title='configuration guide', + link=self._CONFIG_GUIDE_LINK, + ) + section.write('.') + section.style.new_line() + + def _add_context_params_list(self, section): + section.style.new_line() + sn = f'``{self._service_name}``' + section.writeln(f'The available {sn} client context params are:') + for param in self._context_params: + section.style.new_line() + name = f'``{xform_name(param.name)}``' + section.write(f'* {name} ({param.type}) - {param.documentation}') diff --git a/py311/lib/python3.11/site-packages/botocore/docs/docstring.py b/py311/lib/python3.11/site-packages/botocore/docs/docstring.py new file mode 100644 index 0000000000000000000000000000000000000000..93b2e6b23cc5b295fe23a151ae3a0ffe797f0db6 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/docs/docstring.py @@ -0,0 +1,97 @@ +# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +from botocore.docs.bcdoc.restdoc import DocumentStructure +from botocore.docs.method import document_model_driven_method +from botocore.docs.paginator import document_paginate_method +from botocore.docs.waiter import document_wait_method + + +class LazyLoadedDocstring(str): + """Used for lazily loading docstrings + + You can instantiate this class and assign it to a __doc__ value. + The docstring will not be generated till accessed via __doc__ or + help(). Note that all docstring classes **must** subclass from + this class. It cannot be used directly as a docstring. + """ + + def __init__(self, *args, **kwargs): + """ + The args and kwargs are the same as the underlying document + generation function. These just get proxied to the underlying + function. + """ + super().__init__() + self._gen_args = args + self._gen_kwargs = kwargs + self._docstring = None + + def __new__(cls, *args, **kwargs): + # Needed in order to sub class from str with args and kwargs + return super().__new__(cls) + + def _write_docstring(self, *args, **kwargs): + raise NotImplementedError( + '_write_docstring is not implemented. Please subclass from ' + 'this class and provide your own _write_docstring method' + ) + + def expandtabs(self, tabsize=8): + """Expands tabs to spaces + + So this is a big hack in order to get lazy loaded docstring work + for the ``help()``. In the ``help()`` function, ``pydoc`` and + ``inspect`` are used. At some point the ``inspect.cleandoc`` + method is called. To clean the docs ``expandtabs`` is called + and that is where we override the method to generate and return the + docstrings. + """ + if self._docstring is None: + self._generate() + return self._docstring.expandtabs(tabsize) + + def __str__(self): + return self._generate() + + # __doc__ of target will use either __repr__ or __str__ of this class. + __repr__ = __str__ + + def _generate(self): + # Generate the docstring if it is not already cached. + if self._docstring is None: + self._docstring = self._create_docstring() + return self._docstring + + def _create_docstring(self): + docstring_structure = DocumentStructure('docstring', target='html') + # Call the document method function with the args and kwargs + # passed to the class. + self._write_docstring( + docstring_structure, *self._gen_args, **self._gen_kwargs + ) + return docstring_structure.flush_structure().decode('utf-8') + + +class ClientMethodDocstring(LazyLoadedDocstring): + def _write_docstring(self, *args, **kwargs): + document_model_driven_method(*args, **kwargs) + + +class WaiterDocstring(LazyLoadedDocstring): + def _write_docstring(self, *args, **kwargs): + document_wait_method(*args, **kwargs) + + +class PaginatorDocstring(LazyLoadedDocstring): + def _write_docstring(self, *args, **kwargs): + document_paginate_method(*args, **kwargs) diff --git a/py311/lib/python3.11/site-packages/botocore/docs/example.py b/py311/lib/python3.11/site-packages/botocore/docs/example.py new file mode 100644 index 0000000000000000000000000000000000000000..cb43db550962030728a69d3b47ae50d40c15fa35 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/docs/example.py @@ -0,0 +1,236 @@ +# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +from botocore.docs.shape import ShapeDocumenter +from botocore.docs.utils import py_default + + +class BaseExampleDocumenter(ShapeDocumenter): + def document_example( + self, section, shape, prefix=None, include=None, exclude=None + ): + """Generates an example based on a shape + + :param section: The section to write the documentation to. + + :param shape: The shape of the operation. + + :param prefix: Anything to be included before the example + + :type include: Dictionary where keys are parameter names and + values are the shapes of the parameter names. + :param include: The parameter shapes to include in the documentation. + + :type exclude: List of the names of the parameters to exclude. + :param exclude: The names of the parameters to exclude from + documentation. + """ + history = [] + section.style.new_line() + section.style.start_codeblock() + if prefix is not None: + section.write(prefix) + self.traverse_and_document_shape( + section=section, + shape=shape, + history=history, + include=include, + exclude=exclude, + ) + final_blank_line_section = section.add_new_section('final-blank-line') + final_blank_line_section.style.new_line() + + def document_recursive_shape(self, section, shape, **kwargs): + section.write('{\'... recursive ...\'}') + + def document_shape_default( + self, section, shape, history, include=None, exclude=None, **kwargs + ): + py_type = self._get_special_py_default(shape) + if py_type is None: + py_type = py_default(shape.type_name) + + if self._context.get('streaming_shape') == shape: + py_type = 'StreamingBody()' + section.write(py_type) + + def document_shape_type_string( + self, section, shape, history, include=None, exclude=None, **kwargs + ): + if 'enum' in shape.metadata: + for i, enum in enumerate(shape.metadata['enum']): + section.write(f'\'{enum}\'') + if i < len(shape.metadata['enum']) - 1: + section.write('|') + else: + self.document_shape_default(section, shape, history) + + def document_shape_type_list( + self, section, shape, history, include=None, exclude=None, **kwargs + ): + param_shape = shape.member + list_section = section.add_new_section('list-value') + self._start_nested_param(list_section, '[') + param_section = list_section.add_new_section( + 'member', context={'shape': param_shape.name} + ) + self.traverse_and_document_shape( + section=param_section, shape=param_shape, history=history + ) + ending_comma_section = list_section.add_new_section('ending-comma') + ending_comma_section.write(',') + ending_bracket_section = list_section.add_new_section('ending-bracket') + self._end_nested_param(ending_bracket_section, ']') + + def document_shape_type_structure( + self, section, shape, history, include=None, exclude=None, **kwargs + ): + if not shape.members: + section.write('{}') + return + + section = section.add_new_section('structure-value') + self._start_nested_param(section, '{') + + input_members = self._add_members_to_shape(shape.members, include) + + for i, param in enumerate(input_members): + if exclude and param in exclude: + continue + param_section = section.add_new_section(param) + param_section.write(f'\'{param}\': ') + param_shape = input_members[param] + param_value_section = param_section.add_new_section( + 'member-value', context={'shape': param_shape.name} + ) + self.traverse_and_document_shape( + section=param_value_section, + shape=param_shape, + history=history, + name=param, + ) + if i < len(input_members) - 1: + ending_comma_section = param_section.add_new_section( + 'ending-comma' + ) + ending_comma_section.write(',') + ending_comma_section.style.new_line() + self._end_structure(section, '{', '}') + + def document_shape_type_map( + self, section, shape, history, include=None, exclude=None, **kwargs + ): + map_section = section.add_new_section('map-value') + self._start_nested_param(map_section, '{') + value_shape = shape.value + key_section = map_section.add_new_section( + 'key', context={'shape': shape.key.name} + ) + key_section.write('\'string\': ') + value_section = map_section.add_new_section( + 'value', context={'shape': value_shape.name} + ) + self.traverse_and_document_shape( + section=value_section, shape=value_shape, history=history + ) + end_bracket_section = map_section.add_new_section('ending-bracket') + self._end_nested_param(end_bracket_section, '}') + + def _add_members_to_shape(self, members, include): + if include: + members = members.copy() + for param in include: + members[param.name] = param + return members + + def _start_nested_param(self, section, start=None): + if start is not None: + section.write(start) + section.style.indent() + section.style.indent() + section.style.new_line() + + def _end_nested_param(self, section, end=None): + section.style.dedent() + section.style.dedent() + section.style.new_line() + if end is not None: + section.write(end) + + def _end_structure(self, section, start, end): + # If there are no members in the strucuture, then make sure the + # start and the end bracket are on the same line, by removing all + # previous text and writing the start and end. + if not section.available_sections: + section.clear_text() + section.write(start + end) + self._end_nested_param(section) + else: + end_bracket_section = section.add_new_section('ending-bracket') + self._end_nested_param(end_bracket_section, end) + + +class ResponseExampleDocumenter(BaseExampleDocumenter): + EVENT_NAME = 'response-example' + + def document_shape_type_event_stream( + self, section, shape, history, **kwargs + ): + section.write('EventStream(') + self.document_shape_type_structure(section, shape, history, **kwargs) + end_section = section.add_new_section('event-stream-end') + end_section.write(')') + + +class RequestExampleDocumenter(BaseExampleDocumenter): + EVENT_NAME = 'request-example' + + def document_shape_type_structure( + self, section, shape, history, include=None, exclude=None, **kwargs + ): + param_format = '\'%s\'' + operator = ': ' + start = '{' + end = '}' + + if len(history) <= 1: + operator = '=' + start = '(' + end = ')' + param_format = '%s' + section = section.add_new_section('structure-value') + self._start_nested_param(section, start) + input_members = self._add_members_to_shape(shape.members, include) + + for i, param in enumerate(input_members): + if exclude and param in exclude: + continue + param_section = section.add_new_section(param) + param_section.write(param_format % param) + param_section.write(operator) + param_shape = input_members[param] + param_value_section = param_section.add_new_section( + 'member-value', context={'shape': param_shape.name} + ) + self.traverse_and_document_shape( + section=param_value_section, + shape=param_shape, + history=history, + name=param, + ) + if i < len(input_members) - 1: + ending_comma_section = param_section.add_new_section( + 'ending-comma' + ) + ending_comma_section.write(',') + ending_comma_section.style.new_line() + self._end_structure(section, start, end) diff --git a/py311/lib/python3.11/site-packages/botocore/docs/method.py b/py311/lib/python3.11/site-packages/botocore/docs/method.py new file mode 100644 index 0000000000000000000000000000000000000000..5db906c8ddd5c1278620bcf7479d78b6cf6ea903 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/docs/method.py @@ -0,0 +1,328 @@ +# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import inspect +import types + +from botocore.docs.example import ( + RequestExampleDocumenter, + ResponseExampleDocumenter, +) +from botocore.docs.params import ( + RequestParamsDocumenter, + ResponseParamsDocumenter, +) + +AWS_DOC_BASE = 'https://docs.aws.amazon.com/goto/WebAPI' + + +def get_instance_public_methods(instance): + """Retrieves an objects public methods + + :param instance: The instance of the class to inspect + :rtype: dict + :returns: A dictionary that represents an instance's methods where + the keys are the name of the methods and the + values are the handler to the method. + """ + instance_members = inspect.getmembers(instance) + instance_methods = {} + for name, member in instance_members: + if not name.startswith('_'): + if inspect.ismethod(member): + instance_methods[name] = member + return instance_methods + + +def document_model_driven_signature( + section, name, operation_model, include=None, exclude=None +): + """Documents the signature of a model-driven method + + :param section: The section to write the documentation to. + + :param name: The name of the method + + :param operation_model: The operation model for the method + + :type include: Dictionary where keys are parameter names and + values are the shapes of the parameter names. + :param include: The parameter shapes to include in the documentation. + + :type exclude: List of the names of the parameters to exclude. + :param exclude: The names of the parameters to exclude from + documentation. + """ + params = {} + if operation_model.input_shape: + params = operation_model.input_shape.members + + parameter_names = list(params.keys()) + + if include is not None: + for member in include: + parameter_names.append(member.name) + + if exclude is not None: + for member in exclude: + if member in parameter_names: + parameter_names.remove(member) + + signature_params = '' + if parameter_names: + signature_params = '**kwargs' + section.style.start_sphinx_py_method(name, signature_params) + + +def document_custom_signature( + section, name, method, include=None, exclude=None +): + """Documents the signature of a custom method + + :param section: The section to write the documentation to. + + :param name: The name of the method + + :param method: The handle to the method being documented + + :type include: Dictionary where keys are parameter names and + values are the shapes of the parameter names. + :param include: The parameter shapes to include in the documentation. + + :type exclude: List of the names of the parameters to exclude. + :param exclude: The names of the parameters to exclude from + documentation. + """ + signature = inspect.signature(method) + # "raw" class methods are FunctionType and they include "self" param + # object methods are MethodType and they skip the "self" param + if isinstance(method, types.FunctionType): + self_param = next(iter(signature.parameters)) + self_kind = signature.parameters[self_param].kind + # safety check that we got the right parameter + assert self_kind == inspect.Parameter.POSITIONAL_OR_KEYWORD + new_params = signature.parameters.copy() + del new_params[self_param] + signature = signature.replace(parameters=new_params.values()) + signature_params = str(signature).lstrip('(') + signature_params = signature_params.rstrip(')') + section.style.start_sphinx_py_method(name, signature_params) + + +def document_custom_method(section, method_name, method): + """Documents a non-data driven method + + :param section: The section to write the documentation to. + + :param method_name: The name of the method + + :param method: The handle to the method being documented + """ + full_method_name = f"{section.context.get('qualifier', '')}{method_name}" + document_custom_signature(section, full_method_name, method) + method_intro_section = section.add_new_section('method-intro') + method_intro_section.writeln('') + doc_string = inspect.getdoc(method) + if doc_string is not None: + method_intro_section.style.write_py_doc_string(doc_string) + + +def document_model_driven_method( + section, + method_name, + operation_model, + event_emitter, + method_description=None, + example_prefix=None, + include_input=None, + include_output=None, + exclude_input=None, + exclude_output=None, + document_output=True, + include_signature=True, +): + """Documents an individual method + + :param section: The section to write to + + :param method_name: The name of the method + + :param operation_model: The model of the operation + + :param event_emitter: The event emitter to use to emit events + + :param example_prefix: The prefix to use in the method example. + + :type include_input: Dictionary where keys are parameter names and + values are the shapes of the parameter names. + :param include_input: The parameter shapes to include in the + input documentation. + + :type include_output: Dictionary where keys are parameter names and + values are the shapes of the parameter names. + :param include_input: The parameter shapes to include in the + output documentation. + + :type exclude_input: List of the names of the parameters to exclude. + :param exclude_input: The names of the parameters to exclude from + input documentation. + + :type exclude_output: List of the names of the parameters to exclude. + :param exclude_input: The names of the parameters to exclude from + output documentation. + + :param document_output: A boolean flag to indicate whether to + document the output. + + :param include_signature: Whether or not to include the signature. + It is useful for generating docstrings. + """ + # Add the signature if specified. + if include_signature: + document_model_driven_signature( + section, + method_name, + operation_model, + include=include_input, + exclude=exclude_input, + ) + + # Add the description for the method. + method_intro_section = section.add_new_section('method-intro') + method_intro_section.include_doc_string(method_description) + if operation_model.deprecated: + method_intro_section.style.start_danger() + method_intro_section.writeln( + 'This operation is deprecated and may not function as ' + 'expected. This operation should not be used going forward ' + 'and is only kept for the purpose of backwards compatiblity.' + ) + method_intro_section.style.end_danger() + service_uid = operation_model.service_model.metadata.get('uid') + if service_uid is not None: + method_intro_section.style.new_paragraph() + method_intro_section.write("See also: ") + link = f"{AWS_DOC_BASE}/{service_uid}/{operation_model.name}" + method_intro_section.style.external_link( + title="AWS API Documentation", link=link + ) + method_intro_section.writeln('') + + # Add the example section. + example_section = section.add_new_section('request-example') + example_section.style.new_paragraph() + example_section.style.bold('Request Syntax') + + context = { + 'special_shape_types': { + 'streaming_input_shape': operation_model.get_streaming_input(), + 'streaming_output_shape': operation_model.get_streaming_output(), + 'eventstream_output_shape': operation_model.get_event_stream_output(), + }, + } + + if operation_model.input_shape: + RequestExampleDocumenter( + service_name=operation_model.service_model.service_name, + operation_name=operation_model.name, + event_emitter=event_emitter, + context=context, + ).document_example( + example_section, + operation_model.input_shape, + prefix=example_prefix, + include=include_input, + exclude=exclude_input, + ) + else: + example_section.style.new_paragraph() + example_section.style.start_codeblock() + example_section.write(example_prefix + '()') + + # Add the request parameter documentation. + request_params_section = section.add_new_section('request-params') + if operation_model.input_shape: + RequestParamsDocumenter( + service_name=operation_model.service_model.service_name, + operation_name=operation_model.name, + event_emitter=event_emitter, + context=context, + ).document_params( + request_params_section, + operation_model.input_shape, + include=include_input, + exclude=exclude_input, + ) + + # Add the return value documentation + return_section = section.add_new_section('return') + return_section.style.new_line() + if operation_model.output_shape is not None and document_output: + return_section.write(':rtype: dict') + return_section.style.new_line() + return_section.write(':returns: ') + return_section.style.indent() + return_section.style.new_line() + + # If the operation is an event stream, describe the tagged union + event_stream_output = operation_model.get_event_stream_output() + if event_stream_output: + event_section = return_section.add_new_section('event-stream') + event_section.style.new_paragraph() + event_section.write( + 'The response of this operation contains an ' + ':class:`.EventStream` member. When iterated the ' + ':class:`.EventStream` will yield events based on the ' + 'structure below, where only one of the top level keys ' + 'will be present for any given event.' + ) + event_section.style.new_line() + + # Add an example return value + return_example_section = return_section.add_new_section( + 'response-example' + ) + return_example_section.style.new_line() + return_example_section.style.bold('Response Syntax') + return_example_section.style.new_paragraph() + ResponseExampleDocumenter( + service_name=operation_model.service_model.service_name, + operation_name=operation_model.name, + event_emitter=event_emitter, + context=context, + ).document_example( + return_example_section, + operation_model.output_shape, + include=include_output, + exclude=exclude_output, + ) + + # Add a description for the return value + return_description_section = return_section.add_new_section( + 'description' + ) + return_description_section.style.new_line() + return_description_section.style.bold('Response Structure') + return_description_section.style.new_paragraph() + ResponseParamsDocumenter( + service_name=operation_model.service_model.service_name, + operation_name=operation_model.name, + event_emitter=event_emitter, + context=context, + ).document_params( + return_description_section, + operation_model.output_shape, + include=include_output, + exclude=exclude_output, + ) + else: + return_section.write(':returns: None') diff --git a/py311/lib/python3.11/site-packages/botocore/docs/paginator.py b/py311/lib/python3.11/site-packages/botocore/docs/paginator.py new file mode 100644 index 0000000000000000000000000000000000000000..2c9b30034f8c4feb64a8e01de86f476488edac63 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/docs/paginator.py @@ -0,0 +1,241 @@ +# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import os + +from botocore import xform_name +from botocore.compat import OrderedDict +from botocore.docs.bcdoc.restdoc import DocumentStructure +from botocore.docs.method import document_model_driven_method +from botocore.docs.utils import DocumentedShape +from botocore.utils import get_service_module_name + + +class PaginatorDocumenter: + def __init__(self, client, service_paginator_model, root_docs_path): + self._client = client + self._client_class_name = self._client.__class__.__name__ + self._service_name = self._client.meta.service_model.service_name + self._service_paginator_model = service_paginator_model + self._root_docs_path = root_docs_path + self._USER_GUIDE_LINK = ( + 'https://boto3.amazonaws.com/' + 'v1/documentation/api/latest/guide/paginators.html' + ) + + def document_paginators(self, section): + """Documents the various paginators for a service + + param section: The section to write to. + """ + section.style.h2('Paginators') + self._add_overview(section) + section.style.new_line() + section.writeln('The available paginators are:') + section.style.toctree() + + paginator_names = sorted( + self._service_paginator_model._paginator_config + ) + + # List the available paginators and then document each paginator. + for paginator_name in paginator_names: + section.style.tocitem( + f'{self._service_name}/paginator/{paginator_name}' + ) + # Create a new DocumentStructure for each paginator and add contents. + paginator_doc_structure = DocumentStructure( + paginator_name, target='html' + ) + self._add_paginator(paginator_doc_structure, paginator_name) + # Write paginators in individual/nested files. + # Path: /reference/services//paginator/.rst + paginator_dir_path = os.path.join( + self._root_docs_path, self._service_name, 'paginator' + ) + paginator_doc_structure.write_to_file( + paginator_dir_path, paginator_name + ) + + def _add_paginator(self, section, paginator_name): + breadcrumb_section = section.add_new_section('breadcrumb') + breadcrumb_section.style.ref( + self._client_class_name, f'../../{self._service_name}' + ) + breadcrumb_section.write(f' / Paginator / {paginator_name}') + section.add_title_section(paginator_name) + + # Docment the paginator class + paginator_section = section.add_new_section(paginator_name) + paginator_section.style.start_sphinx_py_class( + class_name=( + f'{self._client_class_name}.Paginator.{paginator_name}' + ) + ) + paginator_section.style.start_codeblock() + paginator_section.style.new_line() + + # Document how to instantiate the paginator. + paginator_section.write( + f"paginator = client.get_paginator('{xform_name(paginator_name)}')" + ) + paginator_section.style.end_codeblock() + paginator_section.style.new_line() + # Get the pagination model for the particular paginator. + paginator_config = self._service_paginator_model.get_paginator( + paginator_name + ) + document_paginate_method( + section=paginator_section, + paginator_name=paginator_name, + event_emitter=self._client.meta.events, + service_model=self._client.meta.service_model, + paginator_config=paginator_config, + ) + + def _add_overview(self, section): + section.style.new_line() + section.write( + 'Paginators are available on a client instance ' + 'via the ``get_paginator`` method. For more detailed instructions ' + 'and examples on the usage of paginators, see the ' + 'paginators ' + ) + section.style.external_link( + title='user guide', + link=self._USER_GUIDE_LINK, + ) + section.write('.') + section.style.new_line() + + +def document_paginate_method( + section, + paginator_name, + event_emitter, + service_model, + paginator_config, + include_signature=True, +): + """Documents the paginate method of a paginator + + :param section: The section to write to + + :param paginator_name: The name of the paginator. It is snake cased. + + :param event_emitter: The event emitter to use to emit events + + :param service_model: The service model + + :param paginator_config: The paginator config associated to a particular + paginator. + + :param include_signature: Whether or not to include the signature. + It is useful for generating docstrings. + """ + # Retrieve the operation model of the underlying operation. + operation_model = service_model.operation_model(paginator_name) + + # Add representations of the request and response parameters + # we want to include in the description of the paginate method. + # These are parameters we expose via the botocore interface. + pagination_config_members = OrderedDict() + + pagination_config_members['MaxItems'] = DocumentedShape( + name='MaxItems', + type_name='integer', + documentation=( + '

The total number of items to return. If the total ' + 'number of items available is more than the value ' + 'specified in max-items then a NextToken ' + 'will be provided in the output that you can use to ' + 'resume pagination.

' + ), + ) + + if paginator_config.get('limit_key', None): + pagination_config_members['PageSize'] = DocumentedShape( + name='PageSize', + type_name='integer', + documentation='

The size of each page.

', + ) + + pagination_config_members['StartingToken'] = DocumentedShape( + name='StartingToken', + type_name='string', + documentation=( + '

A token to specify where to start paginating. ' + 'This is the NextToken from a previous ' + 'response.

' + ), + ) + + botocore_pagination_params = [ + DocumentedShape( + name='PaginationConfig', + type_name='structure', + documentation=( + '

A dictionary that provides parameters to control ' + 'pagination.

' + ), + members=pagination_config_members, + ) + ] + + botocore_pagination_response_params = [ + DocumentedShape( + name='NextToken', + type_name='string', + documentation=('

A token to resume pagination.

'), + ) + ] + + service_pagination_params = [] + + # Add the normal input token of the method to a list + # of input paramters that we wish to hide since we expose our own. + if isinstance(paginator_config['input_token'], list): + service_pagination_params += paginator_config['input_token'] + else: + service_pagination_params.append(paginator_config['input_token']) + + # Hide the limit key in the documentation. + if paginator_config.get('limit_key', None): + service_pagination_params.append(paginator_config['limit_key']) + + # Hide the output tokens in the documentation. + service_pagination_response_params = [] + if isinstance(paginator_config['output_token'], list): + service_pagination_response_params += paginator_config['output_token'] + else: + service_pagination_response_params.append( + paginator_config['output_token'] + ) + + paginate_description = ( + 'Creates an iterator that will paginate through responses ' + f'from :py:meth:`{get_service_module_name(service_model)}.Client.{xform_name(paginator_name)}`.' + ) + + document_model_driven_method( + section, + 'paginate', + operation_model, + event_emitter=event_emitter, + method_description=paginate_description, + example_prefix='response_iterator = paginator.paginate', + include_input=botocore_pagination_params, + include_output=botocore_pagination_response_params, + exclude_input=service_pagination_params, + exclude_output=service_pagination_response_params, + include_signature=include_signature, + ) diff --git a/py311/lib/python3.11/site-packages/botocore/docs/params.py b/py311/lib/python3.11/site-packages/botocore/docs/params.py new file mode 100644 index 0000000000000000000000000000000000000000..7e0f398b09f9552d4053edab2335c7e7c96bb1be --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/docs/params.py @@ -0,0 +1,302 @@ +# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +from botocore.docs.shape import ShapeDocumenter +from botocore.docs.utils import py_type_name + + +class BaseParamsDocumenter(ShapeDocumenter): + def document_params(self, section, shape, include=None, exclude=None): + """Fills out the documentation for a section given a model shape. + + :param section: The section to write the documentation to. + + :param shape: The shape of the operation. + + :type include: Dictionary where keys are parameter names and + values are the shapes of the parameter names. + :param include: The parameter shapes to include in the documentation. + + :type exclude: List of the names of the parameters to exclude. + :param exclude: The names of the parameters to exclude from + documentation. + """ + history = [] + self.traverse_and_document_shape( + section=section, + shape=shape, + history=history, + name=None, + include=include, + exclude=exclude, + ) + + def document_recursive_shape(self, section, shape, **kwargs): + self._add_member_documentation(section, shape, **kwargs) + + def document_shape_default( + self, section, shape, history, include=None, exclude=None, **kwargs + ): + self._add_member_documentation(section, shape, **kwargs) + + def document_shape_type_list( + self, section, shape, history, include=None, exclude=None, **kwargs + ): + self._add_member_documentation(section, shape, **kwargs) + param_shape = shape.member + param_section = section.add_new_section( + param_shape.name, context={'shape': shape.member.name} + ) + self._start_nested_param(param_section) + self.traverse_and_document_shape( + section=param_section, + shape=param_shape, + history=history, + name=None, + ) + section = section.add_new_section('end-list') + self._end_nested_param(section) + + def document_shape_type_map( + self, section, shape, history, include=None, exclude=None, **kwargs + ): + self._add_member_documentation(section, shape, **kwargs) + + key_section = section.add_new_section( + 'key', context={'shape': shape.key.name} + ) + self._start_nested_param(key_section) + self._add_member_documentation(key_section, shape.key) + + param_section = section.add_new_section( + shape.value.name, context={'shape': shape.value.name} + ) + param_section.style.indent() + self._start_nested_param(param_section) + self.traverse_and_document_shape( + section=param_section, + shape=shape.value, + history=history, + name=None, + ) + + end_section = section.add_new_section('end-map') + self._end_nested_param(end_section) + self._end_nested_param(end_section) + + def document_shape_type_structure( + self, + section, + shape, + history, + include=None, + exclude=None, + name=None, + **kwargs, + ): + members = self._add_members_to_shape(shape.members, include) + self._add_member_documentation(section, shape, name=name) + for param in members: + if exclude and param in exclude: + continue + param_shape = members[param] + param_section = section.add_new_section( + param, context={'shape': param_shape.name} + ) + self._start_nested_param(param_section) + self.traverse_and_document_shape( + section=param_section, + shape=param_shape, + history=history, + name=param, + ) + section = section.add_new_section('end-structure') + self._end_nested_param(section) + + def _add_member_documentation(self, section, shape, **kwargs): + pass + + def _add_members_to_shape(self, members, include): + if include: + members = members.copy() + for param in include: + members[param.name] = param + return members + + def _document_non_top_level_param_type(self, type_section, shape): + special_py_type = self._get_special_py_type_name(shape) + py_type = py_type_name(shape.type_name) + + type_format = '(%s) --' + if special_py_type is not None: + # Special type can reference a linked class. + # Italicizing it blows away the link. + type_section.write(type_format % special_py_type) + else: + type_section.style.italics(type_format % py_type) + type_section.write(' ') + + def _start_nested_param(self, section): + section.style.indent() + section.style.new_line() + + def _end_nested_param(self, section): + section.style.dedent() + section.style.new_line() + + +class ResponseParamsDocumenter(BaseParamsDocumenter): + """Generates the description for the response parameters""" + + EVENT_NAME = 'response-params' + + def _add_member_documentation(self, section, shape, name=None, **kwargs): + name_section = section.add_new_section('param-name') + name_section.write('- ') + if name is not None: + name_section.style.bold(f'{name}') + name_section.write(' ') + type_section = section.add_new_section('param-type') + self._document_non_top_level_param_type(type_section, shape) + + documentation_section = section.add_new_section('param-documentation') + if shape.documentation: + documentation_section.style.indent() + if getattr(shape, 'is_tagged_union', False): + tagged_union_docs = section.add_new_section( + 'param-tagged-union-docs' + ) + note = ( + '.. note::' + ' This is a Tagged Union structure. Only one of the ' + ' following top level keys will be set: %s. ' + ' If a client receives an unknown member it will ' + ' set ``SDK_UNKNOWN_MEMBER`` as the top level key, ' + ' which maps to the name or tag of the unknown ' + ' member. The structure of ``SDK_UNKNOWN_MEMBER`` is ' + ' as follows' + ) + tagged_union_members_str = ', '.join( + [f'``{key}``' for key in shape.members.keys()] + ) + unknown_code_example = ( + '\'SDK_UNKNOWN_MEMBER\': {\'name\': \'UnknownMemberName\'}' + ) + tagged_union_docs.write(note % (tagged_union_members_str)) + example = section.add_new_section('param-unknown-example') + example.style.codeblock(unknown_code_example) + documentation_section.include_doc_string(shape.documentation) + section.style.new_paragraph() + + def document_shape_type_event_stream( + self, section, shape, history, **kwargs + ): + self.document_shape_type_structure(section, shape, history, **kwargs) + + +class RequestParamsDocumenter(BaseParamsDocumenter): + """Generates the description for the request parameters""" + + EVENT_NAME = 'request-params' + + def document_shape_type_structure( + self, section, shape, history, include=None, exclude=None, **kwargs + ): + if len(history) > 1: + self._add_member_documentation(section, shape, **kwargs) + section.style.indent() + members = self._add_members_to_shape(shape.members, include) + for i, param in enumerate(members): + if exclude and param in exclude: + continue + param_shape = members[param] + param_section = section.add_new_section( + param, context={'shape': param_shape.name} + ) + param_section.style.new_line() + is_required = param in shape.required_members + self.traverse_and_document_shape( + section=param_section, + shape=param_shape, + history=history, + name=param, + is_required=is_required, + ) + section = section.add_new_section('end-structure') + if len(history) > 1: + section.style.dedent() + section.style.new_line() + + def _add_member_documentation( + self, + section, + shape, + name=None, + is_top_level_param=False, + is_required=False, + **kwargs, + ): + py_type = self._get_special_py_type_name(shape) + if py_type is None: + py_type = py_type_name(shape.type_name) + if is_top_level_param: + type_section = section.add_new_section('param-type') + type_section.write(f':type {name}: {py_type}') + end_type_section = type_section.add_new_section('end-param-type') + end_type_section.style.new_line() + name_section = section.add_new_section('param-name') + name_section.write(f':param {name}: ') + + else: + name_section = section.add_new_section('param-name') + name_section.write('- ') + if name is not None: + name_section.style.bold(f'{name}') + name_section.write(' ') + type_section = section.add_new_section('param-type') + self._document_non_top_level_param_type(type_section, shape) + + if is_required: + is_required_section = section.add_new_section('is-required') + is_required_section.style.indent() + is_required_section.style.bold('[REQUIRED]') + is_required_section.write(' ') + if shape.documentation: + documentation_section = section.add_new_section( + 'param-documentation' + ) + documentation_section.style.indent() + if getattr(shape, 'is_tagged_union', False): + tagged_union_docs = section.add_new_section( + 'param-tagged-union-docs' + ) + note = ( + '.. note::' + ' This is a Tagged Union structure. Only one of the ' + ' following top level keys can be set: %s. ' + ) + tagged_union_members_str = ', '.join( + [f'``{key}``' for key in shape.members.keys()] + ) + tagged_union_docs.write(note % (tagged_union_members_str)) + documentation_section.include_doc_string(shape.documentation) + self._add_special_trait_documentation(documentation_section, shape) + end_param_section = section.add_new_section('end-param') + end_param_section.style.new_paragraph() + + def _add_special_trait_documentation(self, section, shape): + if 'idempotencyToken' in shape.metadata: + self._append_idempotency_documentation(section) + + def _append_idempotency_documentation(self, section): + docstring = 'This field is autopopulated if not provided.' + section.write(docstring) diff --git a/py311/lib/python3.11/site-packages/botocore/docs/service.py b/py311/lib/python3.11/site-packages/botocore/docs/service.py new file mode 100644 index 0000000000000000000000000000000000000000..d20a889dc955a71eb3b40e1a4b7eabc549941367 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/docs/service.py @@ -0,0 +1,133 @@ +# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +from botocore.docs.bcdoc.restdoc import DocumentStructure +from botocore.docs.client import ( + ClientContextParamsDocumenter, + ClientDocumenter, + ClientExceptionsDocumenter, +) +from botocore.docs.paginator import PaginatorDocumenter +from botocore.docs.waiter import WaiterDocumenter +from botocore.exceptions import DataNotFoundError + + +class ServiceDocumenter: + def __init__(self, service_name, session, root_docs_path): + self._session = session + self._service_name = service_name + self._root_docs_path = root_docs_path + + self._client = self._session.create_client( + service_name, + region_name='us-east-1', + aws_access_key_id='foo', + aws_secret_access_key='bar', + ) + self._event_emitter = self._client.meta.events + + self.sections = [ + 'title', + 'client-api', + 'client-exceptions', + 'paginator-api', + 'waiter-api', + 'client-context-params', + ] + + def document_service(self): + """Documents an entire service. + + :returns: The reStructured text of the documented service. + """ + doc_structure = DocumentStructure( + self._service_name, section_names=self.sections, target='html' + ) + self.title(doc_structure.get_section('title')) + self.client_api(doc_structure.get_section('client-api')) + self.client_exceptions(doc_structure.get_section('client-exceptions')) + self.paginator_api(doc_structure.get_section('paginator-api')) + self.waiter_api(doc_structure.get_section('waiter-api')) + context_params_section = doc_structure.get_section( + 'client-context-params' + ) + self.client_context_params(context_params_section) + return doc_structure.flush_structure() + + def title(self, section): + section.style.h1(self._client.__class__.__name__) + self._event_emitter.emit( + f"docs.title.{self._service_name}", section=section + ) + + def table_of_contents(self, section): + section.style.table_of_contents(title='Table of Contents', depth=2) + + def client_api(self, section): + examples = None + try: + examples = self.get_examples(self._service_name) + except DataNotFoundError: + pass + + ClientDocumenter( + self._client, self._root_docs_path, examples + ).document_client(section) + + def client_exceptions(self, section): + ClientExceptionsDocumenter( + self._client, self._root_docs_path + ).document_exceptions(section) + + def paginator_api(self, section): + try: + service_paginator_model = self._session.get_paginator_model( + self._service_name + ) + except DataNotFoundError: + return + if service_paginator_model._paginator_config: + paginator_documenter = PaginatorDocumenter( + self._client, service_paginator_model, self._root_docs_path + ) + paginator_documenter.document_paginators(section) + + def waiter_api(self, section): + if self._client.waiter_names: + service_waiter_model = self._session.get_waiter_model( + self._service_name + ) + waiter_documenter = WaiterDocumenter( + self._client, service_waiter_model, self._root_docs_path + ) + waiter_documenter.document_waiters(section) + + def get_examples(self, service_name, api_version=None): + loader = self._session.get_component('data_loader') + examples = loader.load_service_model( + service_name, 'examples-1', api_version + ) + return examples['examples'] + + def client_context_params(self, section): + omitted_params = ClientContextParamsDocumenter.OMITTED_CONTEXT_PARAMS + params_to_omit = omitted_params.get(self._service_name, []) + service_model = self._client.meta.service_model + raw_context_params = service_model.client_context_parameters + context_params = [ + p for p in raw_context_params if p.name not in params_to_omit + ] + if context_params: + context_param_documenter = ClientContextParamsDocumenter( + self._service_name, context_params + ) + context_param_documenter.document_context_params(section) diff --git a/py311/lib/python3.11/site-packages/botocore/docs/shape.py b/py311/lib/python3.11/site-packages/botocore/docs/shape.py new file mode 100644 index 0000000000000000000000000000000000000000..640a5d18ef390b9c4456d24233d863974144c947 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/docs/shape.py @@ -0,0 +1,135 @@ +# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + + +# NOTE: This class should not be instantiated and its +# ``traverse_and_document_shape`` method called directly. It should be +# inherited from a Documenter class with the appropriate methods +# and attributes. +from botocore.utils import is_json_value_header + + +class ShapeDocumenter: + EVENT_NAME = '' + + def __init__( + self, service_name, operation_name, event_emitter, context=None + ): + self._service_name = service_name + self._operation_name = operation_name + self._event_emitter = event_emitter + self._context = context + if context is None: + self._context = {'special_shape_types': {}} + + def traverse_and_document_shape( + self, + section, + shape, + history, + include=None, + exclude=None, + name=None, + is_required=False, + ): + """Traverses and documents a shape + + Will take a self class and call its appropriate methods as a shape + is traversed. + + :param section: The section to document. + + :param history: A list of the names of the shapes that have been + traversed. + + :type include: Dictionary where keys are parameter names and + values are the shapes of the parameter names. + :param include: The parameter shapes to include in the documentation. + + :type exclude: List of the names of the parameters to exclude. + :param exclude: The names of the parameters to exclude from + documentation. + + :param name: The name of the shape. + + :param is_required: If the shape is a required member. + """ + param_type = shape.type_name + if getattr(shape, 'serialization', {}).get('eventstream'): + param_type = 'event_stream' + if shape.name in history: + self.document_recursive_shape(section, shape, name=name) + else: + history.append(shape.name) + is_top_level_param = len(history) == 2 + if hasattr(shape, 'is_document_type') and shape.is_document_type: + param_type = 'document' + getattr( + self, + f"document_shape_type_{param_type}", + self.document_shape_default, + )( + section, + shape, + history=history, + name=name, + include=include, + exclude=exclude, + is_top_level_param=is_top_level_param, + is_required=is_required, + ) + if is_top_level_param: + self._event_emitter.emit( + f"docs.{self.EVENT_NAME}.{self._service_name}.{self._operation_name}.{name}", + section=section, + ) + at_overlying_method_section = len(history) == 1 + if at_overlying_method_section: + self._event_emitter.emit( + f"docs.{self.EVENT_NAME}.{self._service_name}.{self._operation_name}.complete-section", + section=section, + ) + history.pop() + + def _get_special_py_default(self, shape): + special_defaults = { + 'document_type': '{...}|[...]|123|123.4|\'string\'|True|None', + 'jsonvalue_header': '{...}|[...]|123|123.4|\'string\'|True|None', + 'streaming_input_shape': 'b\'bytes\'|file', + 'streaming_output_shape': 'StreamingBody()', + 'eventstream_output_shape': 'EventStream()', + } + return self._get_value_for_special_type(shape, special_defaults) + + def _get_special_py_type_name(self, shape): + special_type_names = { + 'document_type': ':ref:`document`', + 'jsonvalue_header': 'JSON serializable', + 'streaming_input_shape': 'bytes or seekable file-like object', + 'streaming_output_shape': ':class:`.StreamingBody`', + 'eventstream_output_shape': ':class:`.EventStream`', + } + return self._get_value_for_special_type(shape, special_type_names) + + def _get_value_for_special_type(self, shape, special_type_map): + if is_json_value_header(shape): + return special_type_map['jsonvalue_header'] + if hasattr(shape, 'is_document_type') and shape.is_document_type: + return special_type_map['document_type'] + for special_type, marked_shape in self._context[ + 'special_shape_types' + ].items(): + if special_type in special_type_map: + if shape == marked_shape: + return special_type_map[special_type] + return None diff --git a/py311/lib/python3.11/site-packages/botocore/docs/sharedexample.py b/py311/lib/python3.11/site-packages/botocore/docs/sharedexample.py new file mode 100644 index 0000000000000000000000000000000000000000..29d3df5fc969daf26bba380b571897195387ad78 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/docs/sharedexample.py @@ -0,0 +1,227 @@ +# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import numbers +import re + +from botocore.docs.utils import escape_controls +from botocore.utils import parse_timestamp + + +class SharedExampleDocumenter: + def document_shared_example( + self, example, prefix, section, operation_model + ): + """Documents a single shared example based on its definition. + + :param example: The model of the example + + :param prefix: The prefix to use in the method example. + + :param section: The section to write to. + + :param operation_model: The model of the operation used in the example + """ + section.style.new_paragraph() + section.write(example.get('description')) + section.style.new_line() + self.document_input( + section, example, prefix, operation_model.input_shape + ) + self.document_output(section, example, operation_model.output_shape) + + def document_input(self, section, example, prefix, shape): + input_section = section.add_new_section('input') + input_section.style.start_codeblock() + if prefix is not None: + input_section.write(prefix) + params = example.get('input', {}) + comments = example.get('comments') + if comments: + comments = comments.get('input') + param_section = input_section.add_new_section('parameters') + self._document_params(param_section, params, comments, [], shape) + closing_section = input_section.add_new_section('input-close') + closing_section.style.new_line() + closing_section.style.new_line() + closing_section.write('print(response)') + closing_section.style.end_codeblock() + + def document_output(self, section, example, shape): + output_section = section.add_new_section('output') + output_section.style.new_line() + output_section.write('Expected Output:') + output_section.style.new_line() + output_section.style.start_codeblock() + params = example.get('output', {}) + + # There might not be an output, but we will return metadata anyway + params['ResponseMetadata'] = {"...": "..."} + comments = example.get('comments') + if comments: + comments = comments.get('output') + self._document_dict(output_section, params, comments, [], shape, True) + closing_section = output_section.add_new_section('output-close') + closing_section.style.end_codeblock() + + def _document(self, section, value, comments, path, shape): + """ + :param section: The section to add the docs to. + + :param value: The input / output values representing the parameters that + are included in the example. + + :param comments: The dictionary containing all the comments to be + applied to the example. + + :param path: A list describing where the documenter is in traversing the + parameters. This is used to find the equivalent location + in the comments dictionary. + """ + if isinstance(value, dict): + self._document_dict(section, value, comments, path, shape) + elif isinstance(value, list): + self._document_list(section, value, comments, path, shape) + elif isinstance(value, numbers.Number): + self._document_number(section, value, path) + elif shape and shape.type_name == 'timestamp': + self._document_datetime(section, value, path) + else: + self._document_str(section, value, path) + + def _document_dict( + self, section, value, comments, path, shape, top_level=False + ): + dict_section = section.add_new_section('dict-value') + self._start_nested_value(dict_section, '{') + for key, val in value.items(): + path.append(f'.{key}') + item_section = dict_section.add_new_section(key) + item_section.style.new_line() + item_comment = self._get_comment(path, comments) + if item_comment: + item_section.write(item_comment) + item_section.style.new_line() + item_section.write(f"'{key}': ") + + # Shape could be none if there is no output besides ResponseMetadata + item_shape = None + if shape: + if shape.type_name == 'structure': + item_shape = shape.members.get(key) + elif shape.type_name == 'map': + item_shape = shape.value + self._document(item_section, val, comments, path, item_shape) + path.pop() + dict_section_end = dict_section.add_new_section('ending-brace') + self._end_nested_value(dict_section_end, '}') + if not top_level: + dict_section_end.write(',') + + def _document_params(self, section, value, comments, path, shape): + param_section = section.add_new_section('param-values') + self._start_nested_value(param_section, '(') + for key, val in value.items(): + path.append(f'.{key}') + item_section = param_section.add_new_section(key) + item_section.style.new_line() + item_comment = self._get_comment(path, comments) + if item_comment: + item_section.write(item_comment) + item_section.style.new_line() + item_section.write(key + '=') + + # Shape could be none if there are no input parameters + item_shape = None + if shape: + item_shape = shape.members.get(key) + self._document(item_section, val, comments, path, item_shape) + path.pop() + param_section_end = param_section.add_new_section('ending-parenthesis') + self._end_nested_value(param_section_end, ')') + + def _document_list(self, section, value, comments, path, shape): + list_section = section.add_new_section('list-section') + self._start_nested_value(list_section, '[') + item_shape = shape.member + for index, val in enumerate(value): + item_section = list_section.add_new_section(index) + item_section.style.new_line() + path.append(f'[{index}]') + item_comment = self._get_comment(path, comments) + if item_comment: + item_section.write(item_comment) + item_section.style.new_line() + self._document(item_section, val, comments, path, item_shape) + path.pop() + list_section_end = list_section.add_new_section('ending-bracket') + self._end_nested_value(list_section_end, '],') + + def _document_str(self, section, value, path): + # We do the string conversion because this might accept a type that + # we don't specifically address. + safe_value = escape_controls(value) + section.write(f"'{safe_value}',") + + def _document_number(self, section, value, path): + section.write(f"{str(value)},") + + def _document_datetime(self, section, value, path): + datetime_tuple = parse_timestamp(value).timetuple() + datetime_str = str(datetime_tuple[0]) + for i in range(1, len(datetime_tuple)): + datetime_str += ", " + str(datetime_tuple[i]) + section.write(f"datetime({datetime_str}),") + + def _get_comment(self, path, comments): + key = re.sub(r'^\.', '', ''.join(path)) + if comments and key in comments: + return '# ' + comments[key] + else: + return '' + + def _start_nested_value(self, section, start): + section.write(start) + section.style.indent() + section.style.indent() + + def _end_nested_value(self, section, end): + section.style.dedent() + section.style.dedent() + section.style.new_line() + section.write(end) + + +def document_shared_examples( + section, operation_model, example_prefix, shared_examples +): + """Documents the shared examples + + :param section: The section to write to. + + :param operation_model: The model of the operation. + + :param example_prefix: The prefix to use in the method example. + + :param shared_examples: The shared JSON examples from the model. + """ + container_section = section.add_new_section('shared-examples') + container_section.style.new_paragraph() + container_section.style.bold('Examples') + documenter = SharedExampleDocumenter() + for example in shared_examples: + documenter.document_shared_example( + example=example, + section=container_section.add_new_section(example['id']), + prefix=example_prefix, + operation_model=operation_model, + ) diff --git a/py311/lib/python3.11/site-packages/botocore/docs/translator.py b/py311/lib/python3.11/site-packages/botocore/docs/translator.py new file mode 100644 index 0000000000000000000000000000000000000000..0b0a308930c6d3d525ceb5cc63abbe43b5c7559e --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/docs/translator.py @@ -0,0 +1,62 @@ +# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +from docutils import nodes +from sphinx.locale import admonitionlabels +from sphinx.writers.html5 import HTML5Translator as SphinxHTML5Translator + + +class BotoHTML5Translator(SphinxHTML5Translator): + """Extension of Sphinx's ``HTML5Translator`` for Botocore documentation.""" + + IGNORE_IMPLICIT_HEADINGS = [ + '[REQUIRED]', + ] + + def visit_admonition(self, node, name=""): + """Uses the h3 tag for admonition titles instead of the p tag.""" + self.body.append( + self.starttag(node, "div", CLASS=("admonition " + name)) + ) + if name: + title = ( + f"

{admonitionlabels[name]}

" + ) + self.body.append(title) + + def is_implicit_heading(self, node): + """Determines if a node is an implicit heading. + + An implicit heading is represented by a paragraph node whose only + child is a strong node with text that isnt in `IGNORE_IMPLICIT_HEADINGS`. + """ + return ( + len(node) == 1 + and isinstance(node[0], nodes.strong) + and len(node[0]) == 1 + and isinstance(node[0][0], nodes.Text) + and node[0][0].astext() not in self.IGNORE_IMPLICIT_HEADINGS + ) + + def visit_paragraph(self, node): + """Visit a paragraph HTML element. + + Replaces implicit headings with an h3 tag and defers to default + behavior for normal paragraph elements. + """ + if self.is_implicit_heading(node): + text = node[0][0] + self.body.append(f'

{text}

\n') + # Do not visit the current nodes children or call its depart method. + raise nodes.SkipNode + else: + super().visit_paragraph(node) diff --git a/py311/lib/python3.11/site-packages/botocore/docs/utils.py b/py311/lib/python3.11/site-packages/botocore/docs/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..161e260229155bbc38b4503475240bc4295fdf4c --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/docs/utils.py @@ -0,0 +1,225 @@ +# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import re +from collections import namedtuple + + +def py_type_name(type_name): + """Get the Python type name for a given model type. + + >>> py_type_name('list') + 'list' + >>> py_type_name('structure') + 'dict' + + :rtype: string + """ + return { + 'blob': 'bytes', + 'character': 'string', + 'double': 'float', + 'long': 'integer', + 'map': 'dict', + 'structure': 'dict', + 'timestamp': 'datetime', + }.get(type_name, type_name) + + +def py_default(type_name): + """Get the Python default value for a given model type. + + >>> py_default('string') + '\'string\'' + >>> py_default('list') + '[...]' + >>> py_default('unknown') + '...' + + :rtype: string + """ + return { + 'double': '123.0', + 'long': '123', + 'integer': '123', + 'string': "'string'", + 'blob': "b'bytes'", + 'boolean': 'True|False', + 'list': '[...]', + 'map': '{...}', + 'structure': '{...}', + 'timestamp': 'datetime(2015, 1, 1)', + }.get(type_name, '...') + + +def get_official_service_name(service_model): + """Generate the official name of an AWS Service + + :param service_model: The service model representing the service + """ + official_name = service_model.metadata.get('serviceFullName') + short_name = service_model.metadata.get('serviceAbbreviation', '') + if short_name.startswith('Amazon'): + short_name = short_name[7:] + if short_name.startswith('AWS'): + short_name = short_name[4:] + if short_name and short_name.lower() not in official_name.lower(): + official_name += f' ({short_name})' + return official_name + + +_DocumentedShape = namedtuple( + 'DocumentedShape', + [ + 'name', + 'type_name', + 'documentation', + 'metadata', + 'members', + 'required_members', + ], +) + + +class DocumentedShape(_DocumentedShape): + """Use this class to inject new shapes into a model for documentation""" + + def __new__( + cls, + name, + type_name, + documentation, + metadata=None, + members=None, + required_members=None, + ): + if metadata is None: + metadata = [] + if members is None: + members = [] + if required_members is None: + required_members = [] + return super().__new__( + cls, + name, + type_name, + documentation, + metadata, + members, + required_members, + ) + + +class AutoPopulatedParam: + def __init__(self, name, param_description=None): + self.name = name + self.param_description = param_description + if param_description is None: + self.param_description = ( + 'Please note that this parameter is automatically populated ' + 'if it is not provided. Including this parameter is not ' + 'required\n' + ) + + def document_auto_populated_param(self, event_name, section, **kwargs): + """Documents auto populated parameters + + It will remove any required marks for the parameter, remove the + parameter from the example, and add a snippet about the parameter + being autopopulated in the description. + """ + if event_name.startswith('docs.request-params'): + if self.name in section.available_sections: + section = section.get_section(self.name) + if 'is-required' in section.available_sections: + section.delete_section('is-required') + description_section = section.get_section( + 'param-documentation' + ) + description_section.writeln(self.param_description) + elif event_name.startswith('docs.request-example'): + section = section.get_section('structure-value') + if self.name in section.available_sections: + section.delete_section(self.name) + + +class HideParamFromOperations: + """Hides a single parameter from multiple operations. + + This method will remove a parameter from documentation and from + examples. This method is typically used for things that are + automatically populated because a user would be unable to provide + a value (e.g., a checksum of a serialized XML request body).""" + + def __init__(self, service_name, parameter_name, operation_names): + """ + :type service_name: str + :param service_name: Name of the service to modify. + + :type parameter_name: str + :param parameter_name: Name of the parameter to modify. + + :type operation_names: list + :param operation_names: Operation names to modify. + """ + self._parameter_name = parameter_name + self._params_events = set() + self._example_events = set() + # Build up the sets of relevant event names. + param_template = 'docs.request-params.%s.%s.complete-section' + example_template = 'docs.request-example.%s.%s.complete-section' + for name in operation_names: + self._params_events.add(param_template % (service_name, name)) + self._example_events.add(example_template % (service_name, name)) + + def hide_param(self, event_name, section, **kwargs): + if event_name in self._example_events: + # Modify the structure value for example events. + section = section.get_section('structure-value') + elif event_name not in self._params_events: + return + if self._parameter_name in section.available_sections: + section.delete_section(self._parameter_name) + + +class AppendParamDocumentation: + """Appends documentation to a specific parameter""" + + def __init__(self, parameter_name, doc_string): + self._parameter_name = parameter_name + self._doc_string = doc_string + + def append_documentation(self, event_name, section, **kwargs): + if self._parameter_name in section.available_sections: + section = section.get_section(self._parameter_name) + description_section = section.get_section('param-documentation') + description_section.writeln(self._doc_string) + + +_CONTROLS = { + '\n': '\\n', + '\r': '\\r', + '\t': '\\t', + '\b': '\\b', + '\f': '\\f', +} +# Combines all CONTROLS keys into a big or regular expression +_ESCAPE_CONTROLS_RE = re.compile('|'.join(map(re.escape, _CONTROLS))) + + +# Based on the match get the appropriate replacement from CONTROLS +def _CONTROLS_MATCH_HANDLER(match): + return _CONTROLS[match.group(0)] + + +def escape_controls(value): + return _ESCAPE_CONTROLS_RE.sub(_CONTROLS_MATCH_HANDLER, value) diff --git a/py311/lib/python3.11/site-packages/botocore/docs/waiter.py b/py311/lib/python3.11/site-packages/botocore/docs/waiter.py new file mode 100644 index 0000000000000000000000000000000000000000..2918602d2f471be53be47357bd2cf4326fa53dab --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/docs/waiter.py @@ -0,0 +1,180 @@ +# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import os + +from botocore import xform_name +from botocore.compat import OrderedDict +from botocore.docs.bcdoc.restdoc import DocumentStructure +from botocore.docs.method import document_model_driven_method +from botocore.docs.utils import DocumentedShape +from botocore.utils import get_service_module_name + + +class WaiterDocumenter: + def __init__(self, client, service_waiter_model, root_docs_path): + self._client = client + self._client_class_name = self._client.__class__.__name__ + self._service_name = self._client.meta.service_model.service_name + self._service_waiter_model = service_waiter_model + self._root_docs_path = root_docs_path + self._USER_GUIDE_LINK = ( + 'https://boto3.amazonaws.com/' + 'v1/documentation/api/latest/guide/clients.html#waiters' + ) + + def document_waiters(self, section): + """Documents the various waiters for a service. + + :param section: The section to write to. + """ + section.style.h2('Waiters') + self._add_overview(section) + section.style.new_line() + section.writeln('The available waiters are:') + section.style.toctree() + for waiter_name in self._service_waiter_model.waiter_names: + section.style.tocitem(f'{self._service_name}/waiter/{waiter_name}') + # Create a new DocumentStructure for each waiter and add contents. + waiter_doc_structure = DocumentStructure( + waiter_name, target='html' + ) + self._add_single_waiter(waiter_doc_structure, waiter_name) + # Write waiters in individual/nested files. + # Path: /reference/services//waiter/.rst + waiter_dir_path = os.path.join( + self._root_docs_path, self._service_name, 'waiter' + ) + waiter_doc_structure.write_to_file(waiter_dir_path, waiter_name) + + def _add_single_waiter(self, section, waiter_name): + breadcrumb_section = section.add_new_section('breadcrumb') + breadcrumb_section.style.ref( + self._client_class_name, f'../../{self._service_name}' + ) + breadcrumb_section.write(f' / Waiter / {waiter_name}') + section.add_title_section(waiter_name) + waiter_section = section.add_new_section(waiter_name) + waiter_section.style.start_sphinx_py_class( + class_name=f"{self._client_class_name}.Waiter.{waiter_name}" + ) + + # Add example on how to instantiate waiter. + waiter_section.style.start_codeblock() + waiter_section.style.new_line() + waiter_section.write( + f'waiter = client.get_waiter(\'{xform_name(waiter_name)}\')' + ) + waiter_section.style.end_codeblock() + + # Add information on the wait() method + waiter_section.style.new_line() + document_wait_method( + section=waiter_section, + waiter_name=waiter_name, + event_emitter=self._client.meta.events, + service_model=self._client.meta.service_model, + service_waiter_model=self._service_waiter_model, + ) + + def _add_overview(self, section): + section.style.new_line() + section.write( + 'Waiters are available on a client instance ' + 'via the ``get_waiter`` method. For more detailed instructions ' + 'and examples on the usage or waiters, see the ' + 'waiters ' + ) + section.style.external_link( + title='user guide', + link=self._USER_GUIDE_LINK, + ) + section.write('.') + section.style.new_line() + + +def document_wait_method( + section, + waiter_name, + event_emitter, + service_model, + service_waiter_model, + include_signature=True, +): + """Documents a the wait method of a waiter + + :param section: The section to write to + + :param waiter_name: The name of the waiter + + :param event_emitter: The event emitter to use to emit events + + :param service_model: The service model + + :param service_waiter_model: The waiter model associated to the service + + :param include_signature: Whether or not to include the signature. + It is useful for generating docstrings. + """ + waiter_model = service_waiter_model.get_waiter(waiter_name) + operation_model = service_model.operation_model(waiter_model.operation) + + waiter_config_members = OrderedDict() + + waiter_config_members['Delay'] = DocumentedShape( + name='Delay', + type_name='integer', + documentation=( + '

The amount of time in seconds to wait between ' + f'attempts. Default: {waiter_model.delay}

' + ), + ) + + waiter_config_members['MaxAttempts'] = DocumentedShape( + name='MaxAttempts', + type_name='integer', + documentation=( + '

The maximum number of attempts to be made. ' + f'Default: {waiter_model.max_attempts}

' + ), + ) + + botocore_waiter_params = [ + DocumentedShape( + name='WaiterConfig', + type_name='structure', + documentation=( + '

A dictionary that provides parameters to control ' + 'waiting behavior.

' + ), + members=waiter_config_members, + ) + ] + + wait_description = ( + f'Polls :py:meth:`{get_service_module_name(service_model)}.Client.' + f'{xform_name(waiter_model.operation)}` every {waiter_model.delay} ' + 'seconds until a successful state is reached. An error is ' + f'raised after {waiter_model.max_attempts} failed checks.' + ) + + document_model_driven_method( + section, + 'wait', + operation_model, + event_emitter=event_emitter, + method_description=wait_description, + example_prefix='waiter.wait', + include_input=botocore_waiter_params, + document_output=False, + include_signature=include_signature, + ) diff --git a/py311/lib/python3.11/site-packages/botocore/endpoint.py b/py311/lib/python3.11/site-packages/botocore/endpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..f393de78f19674a61b612a8ba4b02c7512c744cb --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/endpoint.py @@ -0,0 +1,449 @@ +# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/ +# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +import datetime +import logging +import os +import threading +import time +import uuid + +from botocore import parsers +from botocore.awsrequest import create_request_object +from botocore.compat import get_current_datetime +from botocore.exceptions import HTTPClientError +from botocore.history import get_global_history_recorder +from botocore.hooks import first_non_none_response +from botocore.httpchecksum import handle_checksum_body +from botocore.httpsession import URLLib3Session +from botocore.response import StreamingBody +from botocore.utils import ( + get_environ_proxies, + is_valid_endpoint_url, + is_valid_ipv6_endpoint_url, +) + +logger = logging.getLogger(__name__) +history_recorder = get_global_history_recorder() +DEFAULT_TIMEOUT = 60 +MAX_POOL_CONNECTIONS = 10 + + +def convert_to_response_dict(http_response, operation_model): + """Convert an HTTP response object to a request dict. + + This converts the HTTP response object to a dictionary. + + :type http_response: botocore.awsrequest.AWSResponse + :param http_response: The HTTP response from an AWS service request. + + :rtype: dict + :return: A response dictionary which will contain the following keys: + * headers (dict) + * status_code (int) + * body (string or file-like object) + + """ + response_dict = { + 'headers': http_response.headers, + 'status_code': http_response.status_code, + 'context': { + 'operation_name': operation_model.name, + }, + } + if response_dict['status_code'] >= 300: + response_dict['body'] = http_response.content + elif operation_model.has_event_stream_output: + response_dict['body'] = http_response.raw + elif operation_model.has_streaming_output: + length = response_dict['headers'].get('content-length') + response_dict['body'] = StreamingBody(http_response.raw, length) + else: + response_dict['body'] = http_response.content + return response_dict + + +class Endpoint: + """ + Represents an endpoint for a particular service in a specific + region. Only an endpoint can make requests. + + :ivar service: The Service object that describes this endpoints + service. + :ivar host: The fully qualified endpoint hostname. + :ivar session: The session object. + """ + + def __init__( + self, + host, + endpoint_prefix, + event_emitter, + response_parser_factory=None, + http_session=None, + ): + self._endpoint_prefix = endpoint_prefix + self._event_emitter = event_emitter + self.host = host + self._lock = threading.Lock() + if response_parser_factory is None: + response_parser_factory = parsers.ResponseParserFactory() + self._response_parser_factory = response_parser_factory + self.http_session = http_session + if self.http_session is None: + self.http_session = URLLib3Session() + + def __repr__(self): + return f'{self._endpoint_prefix}({self.host})' + + def close(self): + self.http_session.close() + + def make_request(self, operation_model, request_dict): + logger.debug( + "Making request for %s with params: %s", + operation_model, + request_dict, + ) + return self._send_request(request_dict, operation_model) + + def create_request(self, params, operation_model=None): + request = create_request_object(params) + if operation_model: + request.stream_output = any( + [ + operation_model.has_streaming_output, + operation_model.has_event_stream_output, + ] + ) + service_id = operation_model.service_model.service_id.hyphenize() + event_name = f'request-created.{service_id}.{operation_model.name}' + self._event_emitter.emit( + event_name, + request=request, + operation_name=operation_model.name, + ) + prepared_request = self.prepare_request(request) + return prepared_request + + def _encode_headers(self, headers): + # In place encoding of headers to utf-8 if they are unicode. + for key, value in headers.items(): + if isinstance(value, str): + headers[key] = value.encode('utf-8') + + def prepare_request(self, request): + self._encode_headers(request.headers) + return request.prepare() + + def _calculate_ttl( + self, response_received_timestamp, date_header, read_timeout + ): + local_timestamp = get_current_datetime() + date_conversion = datetime.datetime.strptime( + date_header, "%a, %d %b %Y %H:%M:%S %Z" + ) + estimated_skew = date_conversion - response_received_timestamp + ttl = ( + local_timestamp + + datetime.timedelta(seconds=read_timeout) + + estimated_skew + ) + return ttl.strftime('%Y%m%dT%H%M%SZ') + + def _set_ttl(self, retries_context, read_timeout, success_response): + response_date_header = success_response[0].headers.get('Date') + has_streaming_input = retries_context.get('has_streaming_input') + if response_date_header and not has_streaming_input: + try: + response_received_timestamp = get_current_datetime() + retries_context['ttl'] = self._calculate_ttl( + response_received_timestamp, + response_date_header, + read_timeout, + ) + except Exception: + logger.debug( + "Exception received when updating retries context with TTL", + exc_info=True, + ) + + def _update_retries_context(self, context, attempt, success_response=None): + retries_context = context.setdefault('retries', {}) + retries_context['attempt'] = attempt + if 'invocation-id' not in retries_context: + retries_context['invocation-id'] = str(uuid.uuid4()) + + if success_response: + read_timeout = context['client_config'].read_timeout + self._set_ttl(retries_context, read_timeout, success_response) + + def _send_request(self, request_dict, operation_model): + attempts = 1 + context = request_dict['context'] + self._update_retries_context(context, attempts) + request = self.create_request(request_dict, operation_model) + success_response, exception = self._get_response( + request, operation_model, context + ) + while self._needs_retry( + attempts, + operation_model, + request_dict, + success_response, + exception, + ): + attempts += 1 + self._update_retries_context(context, attempts, success_response) + # If there is a stream associated with the request, we need + # to reset it before attempting to send the request again. + # This will ensure that we resend the entire contents of the + # body. + request.reset_stream() + # Create a new request when retried (including a new signature). + request = self.create_request(request_dict, operation_model) + success_response, exception = self._get_response( + request, operation_model, context + ) + if ( + success_response is not None + and 'ResponseMetadata' in success_response[1] + ): + # We want to share num retries, not num attempts. + total_retries = attempts - 1 + success_response[1]['ResponseMetadata']['RetryAttempts'] = ( + total_retries + ) + if exception is not None: + raise exception + else: + return success_response + + def _get_response(self, request, operation_model, context): + # This will return a tuple of (success_response, exception) + # and success_response is itself a tuple of + # (http_response, parsed_dict). + # If an exception occurs then the success_response is None. + # If no exception occurs then exception is None. + success_response, exception = self._do_get_response( + request, operation_model, context + ) + kwargs_to_emit = { + 'response_dict': None, + 'parsed_response': None, + 'context': context, + 'exception': exception, + } + if success_response is not None: + http_response, parsed_response = success_response + kwargs_to_emit['parsed_response'] = parsed_response + kwargs_to_emit['response_dict'] = convert_to_response_dict( + http_response, operation_model + ) + service_id = operation_model.service_model.service_id.hyphenize() + self._event_emitter.emit( + f"response-received.{service_id}.{operation_model.name}", + **kwargs_to_emit, + ) + return success_response, exception + + def _do_get_response(self, request, operation_model, context): + try: + logger.debug("Sending http request: %s", request) + history_recorder.record( + 'HTTP_REQUEST', + { + 'method': request.method, + 'headers': request.headers, + 'streaming': operation_model.has_streaming_input, + 'url': request.url, + 'body': request.body, + }, + ) + service_id = operation_model.service_model.service_id.hyphenize() + event_name = f"before-send.{service_id}.{operation_model.name}" + responses = self._event_emitter.emit(event_name, request=request) + http_response = first_non_none_response(responses) + if http_response is None: + http_response = self._send(request) + except HTTPClientError as e: + return (None, e) + except Exception as e: + logger.debug( + "Exception received when sending HTTP request.", exc_info=True + ) + return (None, e) + # This returns the http_response and the parsed_data. + response_dict = convert_to_response_dict( + http_response, operation_model + ) + handle_checksum_body( + http_response, + response_dict, + context, + operation_model, + ) + + http_response_record_dict = response_dict.copy() + http_response_record_dict['streaming'] = ( + operation_model.has_streaming_output + ) + history_recorder.record('HTTP_RESPONSE', http_response_record_dict) + + protocol = operation_model.service_model.resolved_protocol + customized_response_dict = {} + self._event_emitter.emit( + f"before-parse.{service_id}.{operation_model.name}", + operation_model=operation_model, + response_dict=response_dict, + customized_response_dict=customized_response_dict, + ) + parser = self._response_parser_factory.create_parser(protocol) + parsed_response = parser.parse( + response_dict, operation_model.output_shape + ) + parsed_response.update(customized_response_dict) + # Do a second parsing pass to pick up on any modeled error fields + # NOTE: Ideally, we would push this down into the parser classes but + # they currently have no reference to the operation or service model + # The parsers should probably take the operation model instead of + # output shape but we can't change that now + if http_response.status_code >= 300: + self._add_modeled_error_fields( + response_dict, + parsed_response, + operation_model, + parser, + ) + history_recorder.record('PARSED_RESPONSE', parsed_response) + return (http_response, parsed_response), None + + def _add_modeled_error_fields( + self, + response_dict, + parsed_response, + operation_model, + parser, + ): + error_code = parsed_response.get("Error", {}).get("Code") + if error_code is None: + return + service_model = operation_model.service_model + error_shape = service_model.shape_for_error_code(error_code) + if error_shape is None: + return + modeled_parse = parser.parse(response_dict, error_shape) + # TODO: avoid naming conflicts with ResponseMetadata and Error + parsed_response.update(modeled_parse) + + def _needs_retry( + self, + attempts, + operation_model, + request_dict, + response=None, + caught_exception=None, + ): + service_id = operation_model.service_model.service_id.hyphenize() + event_name = f"needs-retry.{service_id}.{operation_model.name}" + responses = self._event_emitter.emit( + event_name, + response=response, + endpoint=self, + operation=operation_model, + attempts=attempts, + caught_exception=caught_exception, + request_dict=request_dict, + ) + handler_response = first_non_none_response(responses) + if handler_response is None: + return False + else: + # Request needs to be retried, and we need to sleep + # for the specified number of times. + logger.debug( + "Response received to retry, sleeping for %s seconds", + handler_response, + ) + time.sleep(handler_response) + return True + + def _send(self, request): + return self.http_session.send(request) + + +class EndpointCreator: + def __init__(self, event_emitter): + self._event_emitter = event_emitter + + def create_endpoint( + self, + service_model, + region_name, + endpoint_url, + verify=None, + response_parser_factory=None, + timeout=DEFAULT_TIMEOUT, + max_pool_connections=MAX_POOL_CONNECTIONS, + http_session_cls=URLLib3Session, + proxies=None, + socket_options=None, + client_cert=None, + proxies_config=None, + ): + if not is_valid_endpoint_url( + endpoint_url + ) and not is_valid_ipv6_endpoint_url(endpoint_url): + raise ValueError(f"Invalid endpoint: {endpoint_url}") + + if proxies is None: + proxies = self._get_proxies(endpoint_url) + endpoint_prefix = service_model.endpoint_prefix + + logger.debug('Setting %s timeout as %s', endpoint_prefix, timeout) + http_session = http_session_cls( + timeout=timeout, + proxies=proxies, + verify=self._get_verify_value(verify), + max_pool_connections=max_pool_connections, + socket_options=socket_options, + client_cert=client_cert, + proxies_config=proxies_config, + ) + + return Endpoint( + endpoint_url, + endpoint_prefix=endpoint_prefix, + event_emitter=self._event_emitter, + response_parser_factory=response_parser_factory, + http_session=http_session, + ) + + def _get_proxies(self, url): + # We could also support getting proxies from a config file, + # but for now proxy support is taken from the environment. + return get_environ_proxies(url) + + def _get_verify_value(self, verify): + # This is to account for: + # https://github.com/kennethreitz/requests/issues/1436 + # where we need to honor REQUESTS_CA_BUNDLE because we're creating our + # own request objects. + # First, if verify is not None, then the user explicitly specified + # a value so this automatically wins. + if verify is not None: + return verify + # Otherwise use the value from REQUESTS_CA_BUNDLE, or default to + # True if the env var does not exist. + return os.environ.get('REQUESTS_CA_BUNDLE', True) diff --git a/py311/lib/python3.11/site-packages/botocore/endpoint_provider.py b/py311/lib/python3.11/site-packages/botocore/endpoint_provider.py new file mode 100644 index 0000000000000000000000000000000000000000..d76f9ac2453b2e4af09cacdaa68c0c8f4f00055f --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/endpoint_provider.py @@ -0,0 +1,723 @@ +# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +""" +NOTE: All classes and functions in this module are considered private and are +subject to abrupt breaking changes. Please do not use them directly. + +To view the raw JSON that the objects in this module represent, please +go to any `endpoint-rule-set.json` file in /botocore/data/// +or you can look at the test files in /tests/unit/data/endpoints/valid-rules/ +""" + +import logging +import re +from enum import Enum +from string import Formatter +from typing import NamedTuple + +from botocore import xform_name +from botocore.compat import IPV4_RE, quote, urlparse +from botocore.exceptions import EndpointResolutionError +from botocore.utils import ( + ArnParser, + InvalidArnException, + is_valid_ipv4_endpoint_url, + is_valid_ipv6_endpoint_url, + lru_cache_weakref, + normalize_url_path, + percent_encode, +) + +logger = logging.getLogger(__name__) + +TEMPLATE_STRING_RE = re.compile(r"\{[a-zA-Z#]+\}") +GET_ATTR_RE = re.compile(r"(\w*)\[(\d+)\]") +VALID_HOST_LABEL_RE = re.compile( + r"^(?!-)[a-zA-Z\d-]{1,63}(?= len(value): + return None + return value[index] + else: + value = value[part] + return value + + def format_partition_output(self, partition): + output = partition["outputs"] + output["name"] = partition["id"] + return output + + def is_partition_match(self, region, partition): + matches_regex = re.match(partition["regionRegex"], region) is not None + return region in partition["regions"] or matches_regex + + def aws_partition(self, value): + """Match a region string to an AWS partition. + + :type value: str + :rtype: dict + """ + partitions = self.partitions_data['partitions'] + + if value is not None: + for partition in partitions: + if self.is_partition_match(value, partition): + return self.format_partition_output(partition) + + # return the default partition if no matches were found + aws_partition = partitions[0] + return self.format_partition_output(aws_partition) + + def aws_parse_arn(self, value): + """Parse and validate string for ARN components. + + :type value: str + :rtype: dict + """ + if value is None or not value.startswith("arn:"): + return None + + try: + arn_dict = ARN_PARSER.parse_arn(value) + except InvalidArnException: + return None + + # partition, resource, and service are required + if not all( + (arn_dict["partition"], arn_dict["service"], arn_dict["resource"]) + ): + return None + + arn_dict["accountId"] = arn_dict.pop("account") + + resource = arn_dict.pop("resource") + arn_dict["resourceId"] = resource.replace(":", "/").split("/") + + return arn_dict + + def is_valid_host_label(self, value, allow_subdomains): + """Evaluates whether a value is a valid host label per + RFC 1123. If allow_subdomains is True, split on `.` and validate + each component separately. + + :type value: str + :type allow_subdomains: bool + :rtype: bool + """ + if value is None or allow_subdomains is False and value.count(".") > 0: + return False + + if allow_subdomains is True: + return all( + self.is_valid_host_label(label, False) + for label in value.split(".") + ) + + return VALID_HOST_LABEL_RE.match(value) is not None + + def string_equals(self, value1, value2): + """Evaluates two string values for equality. + + :type value1: str + :type value2: str + :rtype: bool + """ + if not all(isinstance(val, str) for val in (value1, value2)): + msg = f"Both values must be strings, not {type(value1)} and {type(value2)}." + raise EndpointResolutionError(msg=msg) + return value1 == value2 + + def uri_encode(self, value): + """Perform percent-encoding on an input string. + + :type value: str + :rytpe: str + """ + if value is None: + return None + + return percent_encode(value) + + def parse_url(self, value): + """Parse a URL string into components. + + :type value: str + :rtype: dict + """ + if value is None: + return None + + url_components = urlparse(value) + try: + # url_parse may assign non-integer values to + # `port` and will fail when accessed. + url_components.port + except ValueError: + return None + + scheme = url_components.scheme + query = url_components.query + # URLs with queries are not supported + if scheme not in ("https", "http") or len(query) > 0: + return None + + path = url_components.path + normalized_path = quote(normalize_url_path(path)) + if not normalized_path.endswith("/"): + normalized_path = f"{normalized_path}/" + + return { + "scheme": scheme, + "authority": url_components.netloc, + "path": path, + "normalizedPath": normalized_path, + "isIp": is_valid_ipv4_endpoint_url(value) + or is_valid_ipv6_endpoint_url(value), + } + + def boolean_equals(self, value1, value2): + """Evaluates two boolean values for equality. + + :type value1: bool + :type value2: bool + :rtype: bool + """ + if not all(isinstance(val, bool) for val in (value1, value2)): + msg = f"Both arguments must be bools, not {type(value1)} and {type(value2)}." + raise EndpointResolutionError(msg=msg) + return value1 is value2 + + def is_ascii(self, value): + """Evaluates if a string only contains ASCII characters. + + :type value: str + :rtype: bool + """ + try: + value.encode("ascii") + return True + except UnicodeEncodeError: + return False + + def substring(self, value, start, stop, reverse): + """Computes a substring given the start index and end index. If `reverse` is + True, slice the string from the end instead. + + :type value: str + :type start: int + :type end: int + :type reverse: bool + :rtype: str + """ + if not isinstance(value, str): + msg = f"Input must be a string, not {type(value)}." + raise EndpointResolutionError(msg=msg) + if start >= stop or len(value) < stop or not self.is_ascii(value): + return None + + if reverse is True: + r_start = len(value) - stop + r_stop = len(value) - start + return value[r_start:r_stop] + + return value[start:stop] + + def _not(self, value): + """A function implementation of the logical operator `not`. + + :type value: Any + :rtype: bool + """ + return not value + + def aws_is_virtual_hostable_s3_bucket(self, value, allow_subdomains): + """Evaluates whether a value is a valid bucket name for virtual host + style bucket URLs. To pass, the value must meet the following criteria: + 1. is_valid_host_label(value) is True + 2. length between 3 and 63 characters (inclusive) + 3. does not contain uppercase characters + 4. is not formatted as an IP address + + If allow_subdomains is True, split on `.` and validate + each component separately. + + :type value: str + :type allow_subdomains: bool + :rtype: bool + """ + if ( + value is None + or len(value) < 3 + or value.lower() != value + or IPV4_RE.match(value) is not None + ): + return False + + return self.is_valid_host_label( + value, allow_subdomains=allow_subdomains + ) + + +# maintains backwards compatibility as `Library` was misspelled +# in earlier versions +RuleSetStandardLibary = RuleSetStandardLibrary + + +class BaseRule: + """Base interface for individual endpoint rules.""" + + def __init__(self, conditions, documentation=None): + self.conditions = conditions + self.documentation = documentation + + def evaluate(self, scope_vars, rule_lib): + raise NotImplementedError() + + def evaluate_conditions(self, scope_vars, rule_lib): + """Determine if all conditions in a rule are met. + + :type scope_vars: dict + :type rule_lib: RuleSetStandardLibrary + :rtype: bool + """ + for func_signature in self.conditions: + result = rule_lib.call_function(func_signature, scope_vars) + if result is False or result is None: + return False + return True + + +class RuleSetEndpoint(NamedTuple): + """A resolved endpoint object returned by a rule.""" + + url: str + properties: dict + headers: dict + + +class EndpointRule(BaseRule): + def __init__(self, endpoint, **kwargs): + super().__init__(**kwargs) + self.endpoint = endpoint + + def evaluate(self, scope_vars, rule_lib): + """Determine if conditions are met to provide a valid endpoint. + + :type scope_vars: dict + :rtype: RuleSetEndpoint + """ + if self.evaluate_conditions(scope_vars, rule_lib): + url = rule_lib.resolve_value(self.endpoint["url"], scope_vars) + properties = self.resolve_properties( + self.endpoint.get("properties", {}), + scope_vars, + rule_lib, + ) + headers = self.resolve_headers(scope_vars, rule_lib) + return RuleSetEndpoint( + url=url, properties=properties, headers=headers + ) + + return None + + def resolve_properties(self, properties, scope_vars, rule_lib): + """Traverse `properties` attribute, resolving any template strings. + + :type properties: dict/list/str + :type scope_vars: dict + :type rule_lib: RuleSetStandardLibrary + :rtype: dict + """ + if isinstance(properties, list): + return [ + self.resolve_properties(prop, scope_vars, rule_lib) + for prop in properties + ] + elif isinstance(properties, dict): + return { + key: self.resolve_properties(value, scope_vars, rule_lib) + for key, value in properties.items() + } + elif rule_lib.is_template(properties): + return rule_lib.resolve_template_string(properties, scope_vars) + + return properties + + def resolve_headers(self, scope_vars, rule_lib): + """Iterate through headers attribute resolving all values. + + :type scope_vars: dict + :type rule_lib: RuleSetStandardLibrary + :rtype: dict + """ + resolved_headers = {} + headers = self.endpoint.get("headers", {}) + + for header, values in headers.items(): + resolved_headers[header] = [ + rule_lib.resolve_value(item, scope_vars) for item in values + ] + return resolved_headers + + +class ErrorRule(BaseRule): + def __init__(self, error, **kwargs): + super().__init__(**kwargs) + self.error = error + + def evaluate(self, scope_vars, rule_lib): + """If an error rule's conditions are met, raise an error rule. + + :type scope_vars: dict + :type rule_lib: RuleSetStandardLibrary + :rtype: EndpointResolutionError + """ + if self.evaluate_conditions(scope_vars, rule_lib): + error = rule_lib.resolve_value(self.error, scope_vars) + raise EndpointResolutionError(msg=error) + return None + + +class TreeRule(BaseRule): + """A tree rule is non-terminal meaning it will never be returned to a provider. + Additionally this means it has no attributes that need to be resolved. + """ + + def __init__(self, rules, **kwargs): + super().__init__(**kwargs) + self.rules = [RuleCreator.create(**rule) for rule in rules] + + def evaluate(self, scope_vars, rule_lib): + """If a tree rule's conditions are met, iterate its sub-rules + and return first result found. + + :type scope_vars: dict + :type rule_lib: RuleSetStandardLibrary + :rtype: RuleSetEndpoint/EndpointResolutionError + """ + if self.evaluate_conditions(scope_vars, rule_lib): + for rule in self.rules: + # don't share scope_vars between rules + rule_result = rule.evaluate(scope_vars.copy(), rule_lib) + if rule_result: + return rule_result + return None + + +class RuleCreator: + endpoint = EndpointRule + error = ErrorRule + tree = TreeRule + + @classmethod + def create(cls, **kwargs): + """Create a rule instance from metadata. + + :rtype: TreeRule/EndpointRule/ErrorRule + """ + rule_type = kwargs.pop("type") + try: + rule_class = getattr(cls, rule_type) + except AttributeError: + raise EndpointResolutionError( + msg=f"Unknown rule type: {rule_type}. A rule must " + "be of type tree, endpoint or error." + ) + else: + return rule_class(**kwargs) + + +class ParameterType(Enum): + """Translation from `type` attribute to native Python type.""" + + string = str + boolean = bool + stringarray = tuple + + +class ParameterDefinition: + """The spec of an individual parameter defined in a RuleSet.""" + + def __init__( + self, + name, + parameter_type, + documentation=None, + builtIn=None, + default=None, + required=None, + deprecated=None, + ): + self.name = name + try: + self.parameter_type = getattr( + ParameterType, parameter_type.lower() + ).value + except AttributeError: + raise EndpointResolutionError( + msg=f"Unknown parameter type: {parameter_type}. " + "A parameter must be of type string, boolean, or stringarray." + ) + self.documentation = documentation + self.builtin = builtIn + self.default = default + self.required = required + self.deprecated = deprecated + + def validate_input(self, value): + """Perform base validation on parameter input. + + :type value: Any + :raises: EndpointParametersError + """ + + if not isinstance(value, self.parameter_type): + raise EndpointResolutionError( + msg=f"Value ({self.name}) is the wrong " + f"type. Must be {self.parameter_type}." + ) + if self.deprecated is not None: + depr_str = f"{self.name} has been deprecated." + msg = self.deprecated.get("message") + since = self.deprecated.get("since") + if msg: + depr_str += f"\n{msg}" + if since: + depr_str += f"\nDeprecated since {since}." + logger.info(depr_str) + + return None + + def process_input(self, value): + """Process input against spec, applying default if value is None.""" + if value is None: + if self.default is not None: + return self.default + if self.required: + raise EndpointResolutionError( + msg=f"Cannot find value for required parameter {self.name}" + ) + # in all other cases, the parameter will keep the value None + else: + self.validate_input(value) + return value + + +class RuleSet: + """Collection of rules to derive a routable service endpoint.""" + + def __init__( + self, version, parameters, rules, partitions, documentation=None + ): + self.version = version + self.parameters = self._ingest_parameter_spec(parameters) + self.rules = [RuleCreator.create(**rule) for rule in rules] + self.rule_lib = RuleSetStandardLibrary(partitions) + self.documentation = documentation + + def _ingest_parameter_spec(self, parameters): + return { + name: ParameterDefinition( + name, + spec["type"], + spec.get("documentation"), + spec.get("builtIn"), + spec.get("default"), + spec.get("required"), + spec.get("deprecated"), + ) + for name, spec in parameters.items() + } + + def process_input_parameters(self, input_params): + """Process each input parameter against its spec. + + :type input_params: dict + """ + for name, spec in self.parameters.items(): + value = spec.process_input(input_params.get(name)) + if value is not None: + input_params[name] = value + return None + + def evaluate(self, input_parameters): + """Evaluate input parameters against rules returning first match. + + :type input_parameters: dict + """ + self.process_input_parameters(input_parameters) + for rule in self.rules: + evaluation = rule.evaluate(input_parameters.copy(), self.rule_lib) + if evaluation is not None: + return evaluation + return None + + +class EndpointProvider: + """Derives endpoints from a RuleSet for given input parameters.""" + + def __init__(self, ruleset_data, partition_data): + self.ruleset = RuleSet(**ruleset_data, partitions=partition_data) + + @lru_cache_weakref(maxsize=CACHE_SIZE) + def resolve_endpoint(self, **input_parameters): + """Match input parameters to a rule. + + :type input_parameters: dict + :rtype: RuleSetEndpoint + """ + params_for_error = input_parameters.copy() + endpoint = self.ruleset.evaluate(input_parameters) + if endpoint is None: + param_string = "\n".join( + [f"{key}: {value}" for key, value in params_for_error.items()] + ) + raise EndpointResolutionError( + msg=f"No endpoint found for parameters:\n{param_string}" + ) + return endpoint diff --git a/py311/lib/python3.11/site-packages/botocore/errorfactory.py b/py311/lib/python3.11/site-packages/botocore/errorfactory.py new file mode 100644 index 0000000000000000000000000000000000000000..6084e51da467c0cbf0e7fdeb52f858512bebcc6f --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/errorfactory.py @@ -0,0 +1,90 @@ +# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +from botocore.exceptions import ClientError +from botocore.utils import get_service_module_name + + +class BaseClientExceptions: + ClientError = ClientError + + def __init__(self, code_to_exception): + """Base class for exceptions object on a client + + :type code_to_exception: dict + :param code_to_exception: Mapping of error codes (strings) to exception + class that should be raised when encountering a particular + error code. + """ + self._code_to_exception = code_to_exception + + def from_code(self, error_code): + """Retrieves the error class based on the error code + + This is helpful for identifying the exception class needing to be + caught based on the ClientError.parsed_reponse['Error']['Code'] value + + :type error_code: string + :param error_code: The error code associated to a ClientError exception + + :rtype: ClientError or a subclass of ClientError + :returns: The appropriate modeled exception class for that error + code. If the error code does not match any of the known + modeled exceptions then return a generic ClientError. + """ + return self._code_to_exception.get(error_code, self.ClientError) + + def __getattr__(self, name): + exception_cls_names = [ + exception_cls.__name__ + for exception_cls in self._code_to_exception.values() + ] + raise AttributeError( + rf"{self} object has no attribute {name}. " + rf"Valid exceptions are: {', '.join(exception_cls_names)}" + ) + + +class ClientExceptionsFactory: + def __init__(self): + self._client_exceptions_cache = {} + + def create_client_exceptions(self, service_model): + """Creates a ClientExceptions object for the particular service client + + :type service_model: botocore.model.ServiceModel + :param service_model: The service model for the client + + :rtype: object that subclasses from BaseClientExceptions + :returns: The exceptions object of a client that can be used + to grab the various different modeled exceptions. + """ + service_name = service_model.service_name + if service_name not in self._client_exceptions_cache: + client_exceptions = self._create_client_exceptions(service_model) + self._client_exceptions_cache[service_name] = client_exceptions + return self._client_exceptions_cache[service_name] + + def _create_client_exceptions(self, service_model): + cls_props = {} + code_to_exception = {} + for error_shape in service_model.error_shapes: + exception_name = str(error_shape.name) + exception_cls = type(exception_name, (ClientError,), {}) + cls_props[exception_name] = exception_cls + code = str(error_shape.error_code) + code_to_exception[code] = exception_cls + cls_name = str(get_service_module_name(service_model) + 'Exceptions') + client_exceptions_cls = type( + cls_name, (BaseClientExceptions,), cls_props + ) + return client_exceptions_cls(code_to_exception) diff --git a/py311/lib/python3.11/site-packages/botocore/eventstream.py b/py311/lib/python3.11/site-packages/botocore/eventstream.py new file mode 100644 index 0000000000000000000000000000000000000000..865e65b7870d8154acfa94cf48d3d72305269cc2 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/eventstream.py @@ -0,0 +1,622 @@ +# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +"""Binary Event Stream Decoding""" + +from binascii import crc32 +from struct import unpack + +from botocore.exceptions import EventStreamError + +# byte length of the prelude (total_length + header_length + prelude_crc) +_PRELUDE_LENGTH = 12 +_MAX_HEADERS_LENGTH = 128 * 1024 # 128 Kb +_MAX_PAYLOAD_LENGTH = 24 * 1024 * 1024 # 24 Mb + + +class ParserError(Exception): + """Base binary flow encoding parsing exception.""" + + pass + + +class DuplicateHeader(ParserError): + """Duplicate header found in the event.""" + + def __init__(self, header): + message = f'Duplicate header present: "{header}"' + super().__init__(message) + + +class InvalidHeadersLength(ParserError): + """Headers length is longer than the maximum.""" + + def __init__(self, length): + message = f'Header length of {length} exceeded the maximum of {_MAX_HEADERS_LENGTH}' + super().__init__(message) + + +class InvalidPayloadLength(ParserError): + """Payload length is longer than the maximum.""" + + def __init__(self, length): + message = f'Payload length of {length} exceeded the maximum of {_MAX_PAYLOAD_LENGTH}' + super().__init__(message) + + +class ChecksumMismatch(ParserError): + """Calculated checksum did not match the expected checksum.""" + + def __init__(self, expected, calculated): + message = f'Checksum mismatch: expected 0x{expected:08x}, calculated 0x{calculated:08x}' + super().__init__(message) + + +class NoInitialResponseError(ParserError): + """An event of type initial-response was not received. + + This exception is raised when the event stream produced no events or + the first event in the stream was not of the initial-response type. + """ + + def __init__(self): + message = 'First event was not of the initial-response type' + super().__init__(message) + + +class DecodeUtils: + """Unpacking utility functions used in the decoder. + + All methods on this class take raw bytes and return a tuple containing + the value parsed from the bytes and the number of bytes consumed to parse + that value. + """ + + UINT8_BYTE_FORMAT = '!B' + UINT16_BYTE_FORMAT = '!H' + UINT32_BYTE_FORMAT = '!I' + INT8_BYTE_FORMAT = '!b' + INT16_BYTE_FORMAT = '!h' + INT32_BYTE_FORMAT = '!i' + INT64_BYTE_FORMAT = '!q' + PRELUDE_BYTE_FORMAT = '!III' + + # uint byte size to unpack format + UINT_BYTE_FORMAT = { + 1: UINT8_BYTE_FORMAT, + 2: UINT16_BYTE_FORMAT, + 4: UINT32_BYTE_FORMAT, + } + + @staticmethod + def unpack_true(data): + """This method consumes none of the provided bytes and returns True. + + :type data: bytes + :param data: The bytes to parse from. This is ignored in this method. + + :rtype: tuple + :rtype: (bool, int) + :returns: The tuple (True, 0) + """ + return True, 0 + + @staticmethod + def unpack_false(data): + """This method consumes none of the provided bytes and returns False. + + :type data: bytes + :param data: The bytes to parse from. This is ignored in this method. + + :rtype: tuple + :rtype: (bool, int) + :returns: The tuple (False, 0) + """ + return False, 0 + + @staticmethod + def unpack_uint8(data): + """Parse an unsigned 8-bit integer from the bytes. + + :type data: bytes + :param data: The bytes to parse from. + + :rtype: (int, int) + :returns: A tuple containing the (parsed integer value, bytes consumed) + """ + value = unpack(DecodeUtils.UINT8_BYTE_FORMAT, data[:1])[0] + return value, 1 + + @staticmethod + def unpack_uint32(data): + """Parse an unsigned 32-bit integer from the bytes. + + :type data: bytes + :param data: The bytes to parse from. + + :rtype: (int, int) + :returns: A tuple containing the (parsed integer value, bytes consumed) + """ + value = unpack(DecodeUtils.UINT32_BYTE_FORMAT, data[:4])[0] + return value, 4 + + @staticmethod + def unpack_int8(data): + """Parse a signed 8-bit integer from the bytes. + + :type data: bytes + :param data: The bytes to parse from. + + :rtype: (int, int) + :returns: A tuple containing the (parsed integer value, bytes consumed) + """ + value = unpack(DecodeUtils.INT8_BYTE_FORMAT, data[:1])[0] + return value, 1 + + @staticmethod + def unpack_int16(data): + """Parse a signed 16-bit integer from the bytes. + + :type data: bytes + :param data: The bytes to parse from. + + :rtype: tuple + :rtype: (int, int) + :returns: A tuple containing the (parsed integer value, bytes consumed) + """ + value = unpack(DecodeUtils.INT16_BYTE_FORMAT, data[:2])[0] + return value, 2 + + @staticmethod + def unpack_int32(data): + """Parse a signed 32-bit integer from the bytes. + + :type data: bytes + :param data: The bytes to parse from. + + :rtype: tuple + :rtype: (int, int) + :returns: A tuple containing the (parsed integer value, bytes consumed) + """ + value = unpack(DecodeUtils.INT32_BYTE_FORMAT, data[:4])[0] + return value, 4 + + @staticmethod + def unpack_int64(data): + """Parse a signed 64-bit integer from the bytes. + + :type data: bytes + :param data: The bytes to parse from. + + :rtype: tuple + :rtype: (int, int) + :returns: A tuple containing the (parsed integer value, bytes consumed) + """ + value = unpack(DecodeUtils.INT64_BYTE_FORMAT, data[:8])[0] + return value, 8 + + @staticmethod + def unpack_byte_array(data, length_byte_size=2): + """Parse a variable length byte array from the bytes. + + The bytes are expected to be in the following format: + [ length ][0 ... length bytes] + where length is an unsigned integer represented in the smallest number + of bytes to hold the maximum length of the array. + + :type data: bytes + :param data: The bytes to parse from. + + :type length_byte_size: int + :param length_byte_size: The byte size of the preceding integer that + represents the length of the array. Supported values are 1, 2, and 4. + + :rtype: (bytes, int) + :returns: A tuple containing the (parsed byte array, bytes consumed). + """ + uint_byte_format = DecodeUtils.UINT_BYTE_FORMAT[length_byte_size] + length = unpack(uint_byte_format, data[:length_byte_size])[0] + bytes_end = length + length_byte_size + array_bytes = data[length_byte_size:bytes_end] + return array_bytes, bytes_end + + @staticmethod + def unpack_utf8_string(data, length_byte_size=2): + """Parse a variable length utf-8 string from the bytes. + + The bytes are expected to be in the following format: + [ length ][0 ... length bytes] + where length is an unsigned integer represented in the smallest number + of bytes to hold the maximum length of the array and the following + bytes are a valid utf-8 string. + + :type data: bytes + :param bytes: The bytes to parse from. + + :type length_byte_size: int + :param length_byte_size: The byte size of the preceding integer that + represents the length of the array. Supported values are 1, 2, and 4. + + :rtype: (str, int) + :returns: A tuple containing the (utf-8 string, bytes consumed). + """ + array_bytes, consumed = DecodeUtils.unpack_byte_array( + data, length_byte_size + ) + return array_bytes.decode('utf-8'), consumed + + @staticmethod + def unpack_uuid(data): + """Parse a 16-byte uuid from the bytes. + + :type data: bytes + :param data: The bytes to parse from. + + :rtype: (bytes, int) + :returns: A tuple containing the (uuid bytes, bytes consumed). + """ + return data[:16], 16 + + @staticmethod + def unpack_prelude(data): + """Parse the prelude for an event stream message from the bytes. + + The prelude for an event stream message has the following format: + [total_length][header_length][prelude_crc] + where each field is an unsigned 32-bit integer. + + :rtype: ((int, int, int), int) + :returns: A tuple of ((total_length, headers_length, prelude_crc), + consumed) + """ + return (unpack(DecodeUtils.PRELUDE_BYTE_FORMAT, data), _PRELUDE_LENGTH) + + +def _validate_checksum(data, checksum, crc=0): + # To generate the same numeric value across all Python versions and + # platforms use crc32(data) & 0xffffffff. + computed_checksum = crc32(data, crc) & 0xFFFFFFFF + if checksum != computed_checksum: + raise ChecksumMismatch(checksum, computed_checksum) + + +class MessagePrelude: + """Represents the prelude of an event stream message.""" + + def __init__(self, total_length, headers_length, crc): + self.total_length = total_length + self.headers_length = headers_length + self.crc = crc + + @property + def payload_length(self): + """Calculates the total payload length. + + The extra minus 4 bytes is for the message CRC. + + :rtype: int + :returns: The total payload length. + """ + return self.total_length - self.headers_length - _PRELUDE_LENGTH - 4 + + @property + def payload_end(self): + """Calculates the byte offset for the end of the message payload. + + The extra minus 4 bytes is for the message CRC. + + :rtype: int + :returns: The byte offset from the beginning of the event stream + message to the end of the payload. + """ + return self.total_length - 4 + + @property + def headers_end(self): + """Calculates the byte offset for the end of the message headers. + + :rtype: int + :returns: The byte offset from the beginning of the event stream + message to the end of the headers. + """ + return _PRELUDE_LENGTH + self.headers_length + + +class EventStreamMessage: + """Represents an event stream message.""" + + def __init__(self, prelude, headers, payload, crc): + self.prelude = prelude + self.headers = headers + self.payload = payload + self.crc = crc + + def to_response_dict(self, status_code=200): + message_type = self.headers.get(':message-type') + if message_type == 'error' or message_type == 'exception': + status_code = 400 + return { + 'status_code': status_code, + 'headers': self.headers, + 'body': self.payload, + } + + +class EventStreamHeaderParser: + """Parses the event headers from an event stream message. + + Expects all of the header data upfront and creates a dictionary of headers + to return. This object can be reused multiple times to parse the headers + from multiple event stream messages. + """ + + # Maps header type to appropriate unpacking function + # These unpacking functions return the value and the amount unpacked + _HEADER_TYPE_MAP = { + # boolean_true + 0: DecodeUtils.unpack_true, + # boolean_false + 1: DecodeUtils.unpack_false, + # byte + 2: DecodeUtils.unpack_int8, + # short + 3: DecodeUtils.unpack_int16, + # integer + 4: DecodeUtils.unpack_int32, + # long + 5: DecodeUtils.unpack_int64, + # byte_array + 6: DecodeUtils.unpack_byte_array, + # string + 7: DecodeUtils.unpack_utf8_string, + # timestamp + 8: DecodeUtils.unpack_int64, + # uuid + 9: DecodeUtils.unpack_uuid, + } + + def __init__(self): + self._data = None + + def parse(self, data): + """Parses the event stream headers from an event stream message. + + :type data: bytes + :param data: The bytes that correspond to the headers section of an + event stream message. + + :rtype: dict + :returns: A dictionary of header key, value pairs. + """ + self._data = data + return self._parse_headers() + + def _parse_headers(self): + headers = {} + while self._data: + name, value = self._parse_header() + if name in headers: + raise DuplicateHeader(name) + headers[name] = value + return headers + + def _parse_header(self): + name = self._parse_name() + value = self._parse_value() + return name, value + + def _parse_name(self): + name, consumed = DecodeUtils.unpack_utf8_string(self._data, 1) + self._advance_data(consumed) + return name + + def _parse_type(self): + type, consumed = DecodeUtils.unpack_uint8(self._data) + self._advance_data(consumed) + return type + + def _parse_value(self): + header_type = self._parse_type() + value_unpacker = self._HEADER_TYPE_MAP[header_type] + value, consumed = value_unpacker(self._data) + self._advance_data(consumed) + return value + + def _advance_data(self, consumed): + self._data = self._data[consumed:] + + +class EventStreamBuffer: + """Streaming based event stream buffer + + A buffer class that wraps bytes from an event stream providing parsed + messages as they become available via an iterable interface. + """ + + def __init__(self): + self._data = b'' + self._prelude = None + self._header_parser = EventStreamHeaderParser() + + def add_data(self, data): + """Add data to the buffer. + + :type data: bytes + :param data: The bytes to add to the buffer to be used when parsing + """ + self._data += data + + def _validate_prelude(self, prelude): + if prelude.headers_length > _MAX_HEADERS_LENGTH: + raise InvalidHeadersLength(prelude.headers_length) + + if prelude.payload_length > _MAX_PAYLOAD_LENGTH: + raise InvalidPayloadLength(prelude.payload_length) + + def _parse_prelude(self): + prelude_bytes = self._data[:_PRELUDE_LENGTH] + raw_prelude, _ = DecodeUtils.unpack_prelude(prelude_bytes) + prelude = MessagePrelude(*raw_prelude) + # The minus 4 removes the prelude crc from the bytes to be checked + _validate_checksum(prelude_bytes[: _PRELUDE_LENGTH - 4], prelude.crc) + self._validate_prelude(prelude) + return prelude + + def _parse_headers(self): + header_bytes = self._data[_PRELUDE_LENGTH : self._prelude.headers_end] + return self._header_parser.parse(header_bytes) + + def _parse_payload(self): + prelude = self._prelude + payload_bytes = self._data[prelude.headers_end : prelude.payload_end] + return payload_bytes + + def _parse_message_crc(self): + prelude = self._prelude + crc_bytes = self._data[prelude.payload_end : prelude.total_length] + message_crc, _ = DecodeUtils.unpack_uint32(crc_bytes) + return message_crc + + def _parse_message_bytes(self): + # The minus 4 includes the prelude crc to the bytes to be checked + message_bytes = self._data[ + _PRELUDE_LENGTH - 4 : self._prelude.payload_end + ] + return message_bytes + + def _validate_message_crc(self): + message_crc = self._parse_message_crc() + message_bytes = self._parse_message_bytes() + _validate_checksum(message_bytes, message_crc, crc=self._prelude.crc) + return message_crc + + def _parse_message(self): + crc = self._validate_message_crc() + headers = self._parse_headers() + payload = self._parse_payload() + message = EventStreamMessage(self._prelude, headers, payload, crc) + self._prepare_for_next_message() + return message + + def _prepare_for_next_message(self): + # Advance the data and reset the current prelude + self._data = self._data[self._prelude.total_length :] + self._prelude = None + + def next(self): + """Provides the next available message parsed from the stream + + :rtype: EventStreamMessage + :returns: The next event stream message + """ + if len(self._data) < _PRELUDE_LENGTH: + raise StopIteration() + + if self._prelude is None: + self._prelude = self._parse_prelude() + + if len(self._data) < self._prelude.total_length: + raise StopIteration() + + return self._parse_message() + + def __next__(self): + return self.next() + + def __iter__(self): + return self + + +class EventStream: + """Wrapper class for an event stream body. + + This wraps the underlying streaming body, parsing it for individual events + and yielding them as they come available through the iterator interface. + + The following example uses the S3 select API to get structured data out of + an object stored in S3 using an event stream. + + **Example:** + :: + from botocore.session import Session + + s3 = Session().create_client('s3') + response = s3.select_object_content( + Bucket='bucketname', + Key='keyname', + ExpressionType='SQL', + RequestProgress={'Enabled': True}, + Expression="SELECT * FROM S3Object s", + InputSerialization={'CSV': {}}, + OutputSerialization={'CSV': {}}, + ) + # This is the event stream in the response + event_stream = response['Payload'] + end_event_received = False + with open('output', 'wb') as f: + # Iterate over events in the event stream as they come + for event in event_stream: + # If we received a records event, write the data to a file + if 'Records' in event: + data = event['Records']['Payload'] + f.write(data) + # If we received a progress event, print the details + elif 'Progress' in event: + print(event['Progress']['Details']) + # End event indicates that the request finished successfully + elif 'End' in event: + print('Result is complete') + end_event_received = True + if not end_event_received: + raise Exception("End event not received, request incomplete.") + """ + + def __init__(self, raw_stream, output_shape, parser, operation_name): + self._raw_stream = raw_stream + self._output_shape = output_shape + self._operation_name = operation_name + self._parser = parser + self._event_generator = self._create_raw_event_generator() + + def __iter__(self): + for event in self._event_generator: + parsed_event = self._parse_event(event) + if parsed_event: + yield parsed_event + + def _create_raw_event_generator(self): + event_stream_buffer = EventStreamBuffer() + for chunk in self._raw_stream.stream(): + event_stream_buffer.add_data(chunk) + yield from event_stream_buffer + + def _parse_event(self, event): + response_dict = event.to_response_dict() + parsed_response = self._parser.parse(response_dict, self._output_shape) + if response_dict['status_code'] == 200: + return parsed_response + else: + raise EventStreamError(parsed_response, self._operation_name) + + def get_initial_response(self): + try: + initial_event = next(self._event_generator) + event_type = initial_event.headers.get(':event-type') + if event_type == 'initial-response': + return initial_event + except StopIteration: + pass + raise NoInitialResponseError() + + def close(self): + """Closes the underlying streaming body.""" + self._raw_stream.close() diff --git a/py311/lib/python3.11/site-packages/botocore/exceptions.py b/py311/lib/python3.11/site-packages/botocore/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..294bbf4020cf94067b8637c0607c93ed707dcffc --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/exceptions.py @@ -0,0 +1,857 @@ +# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/ +# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +from botocore.vendored import requests +from botocore.vendored.requests.packages import urllib3 + + +def _exception_from_packed_args(exception_cls, args=None, kwargs=None): + # This is helpful for reducing Exceptions that only accept kwargs as + # only positional arguments can be provided for __reduce__ + # Ideally, this would also be a class method on the BotoCoreError + # but instance methods cannot be pickled. + if args is None: + args = () + if kwargs is None: + kwargs = {} + return exception_cls(*args, **kwargs) + + +class BotoCoreError(Exception): + """ + The base exception class for BotoCore exceptions. + + :ivar msg: The descriptive message associated with the error. + """ + + fmt = 'An unspecified error occurred' + + def __init__(self, **kwargs): + msg = self.fmt.format(**kwargs) + Exception.__init__(self, msg) + self.kwargs = kwargs + + def __reduce__(self): + return _exception_from_packed_args, (self.__class__, None, self.kwargs) + + +class DataNotFoundError(BotoCoreError): + """ + The data associated with a particular path could not be loaded. + + :ivar data_path: The data path that the user attempted to load. + """ + + fmt = 'Unable to load data for: {data_path}' + + +class UnknownServiceError(DataNotFoundError): + """Raised when trying to load data for an unknown service. + + :ivar service_name: The name of the unknown service. + + """ + + fmt = ( + "Unknown service: '{service_name}'. Valid service names are: " + "{known_service_names}" + ) + + +class UnknownRegionError(BotoCoreError): + """Raised when trying to load data for an unknown region. + + :ivar region_name: The name of the unknown region. + + """ + + fmt = "Unknown region: '{region_name}'. {error_msg}" + + +class ApiVersionNotFoundError(BotoCoreError): + """ + The data associated with either the API version or a compatible one + could not be loaded. + + :ivar data_path: The data path that the user attempted to load. + :ivar api_version: The API version that the user attempted to load. + """ + + fmt = 'Unable to load data {data_path} for: {api_version}' + + +class HTTPClientError(BotoCoreError): + fmt = 'An HTTP Client raised an unhandled exception: {error}' + + def __init__(self, request=None, response=None, **kwargs): + self.request = request + self.response = response + super().__init__(**kwargs) + + def __reduce__(self): + return _exception_from_packed_args, ( + self.__class__, + (self.request, self.response), + self.kwargs, + ) + + +class ConnectionError(BotoCoreError): + fmt = 'An HTTP Client failed to establish a connection: {error}' + + +class InvalidIMDSEndpointError(BotoCoreError): + fmt = 'Invalid endpoint EC2 Instance Metadata endpoint: {endpoint}' + + +class InvalidIMDSEndpointModeError(BotoCoreError): + fmt = ( + 'Invalid EC2 Instance Metadata endpoint mode: {mode}' + ' Valid endpoint modes (case-insensitive): {valid_modes}.' + ) + + +class EndpointConnectionError(ConnectionError): + fmt = 'Could not connect to the endpoint URL: "{endpoint_url}"' + + +class SSLError(ConnectionError, requests.exceptions.SSLError): + fmt = 'SSL validation failed for {endpoint_url} {error}' + + +class ConnectionClosedError(HTTPClientError): + fmt = ( + 'Connection was closed before we received a valid response ' + 'from endpoint URL: "{endpoint_url}".' + ) + + +class ReadTimeoutError( + HTTPClientError, + requests.exceptions.ReadTimeout, + urllib3.exceptions.ReadTimeoutError, +): + fmt = 'Read timeout on endpoint URL: "{endpoint_url}"' + + +class ConnectTimeoutError(ConnectionError, requests.exceptions.ConnectTimeout): + fmt = 'Connect timeout on endpoint URL: "{endpoint_url}"' + + +class ProxyConnectionError(ConnectionError, requests.exceptions.ProxyError): + fmt = 'Failed to connect to proxy URL: "{proxy_url}"' + + +class ResponseStreamingError(HTTPClientError): + fmt = 'An error occurred while reading from response stream: {error}' + + +class NoCredentialsError(BotoCoreError): + """ + No credentials could be found. + """ + + fmt = 'Unable to locate credentials' + + +class NoAuthTokenError(BotoCoreError): + """ + No authorization token could be found. + """ + + fmt = 'Unable to locate authorization token' + + +class TokenRetrievalError(BotoCoreError): + """ + Error attempting to retrieve a token from a remote source. + + :ivar provider: The name of the token provider. + :ivar error_msg: The msg explaining why the token could not be retrieved. + + """ + + fmt = 'Error when retrieving token from {provider}: {error_msg}' + + +class PartialCredentialsError(BotoCoreError): + """ + Only partial credentials were found. + + :ivar cred_var: The missing credential variable name. + + """ + + fmt = 'Partial credentials found in {provider}, missing: {cred_var}' + + +class CredentialRetrievalError(BotoCoreError): + """ + Error attempting to retrieve credentials from a remote source. + + :ivar provider: The name of the credential provider. + :ivar error_msg: The msg explaining why credentials could not be + retrieved. + + """ + + fmt = 'Error when retrieving credentials from {provider}: {error_msg}' + + +class UnknownSignatureVersionError(BotoCoreError): + """ + Requested Signature Version is not known. + + :ivar signature_version: The name of the requested signature version. + """ + + fmt = 'Unknown Signature Version: {signature_version}.' + + +class ServiceNotInRegionError(BotoCoreError): + """ + The service is not available in requested region. + + :ivar service_name: The name of the service. + :ivar region_name: The name of the region. + """ + + fmt = 'Service {service_name} not available in region {region_name}' + + +class BaseEndpointResolverError(BotoCoreError): + """Base error for endpoint resolving errors. + + Should never be raised directly, but clients can catch + this exception if they want to generically handle any errors + during the endpoint resolution process. + + """ + + +class NoRegionError(BaseEndpointResolverError): + """No region was specified.""" + + fmt = 'You must specify a region.' + + +class EndpointVariantError(BaseEndpointResolverError): + """ + Could not construct modeled endpoint variant. + + :ivar error_msg: The message explaining why the modeled endpoint variant + is unable to be constructed. + + """ + + fmt = ( + 'Unable to construct a modeled endpoint with the following ' + 'variant(s) {tags}: ' + ) + + +class UnknownEndpointError(BaseEndpointResolverError, ValueError): + """ + Could not construct an endpoint. + + :ivar service_name: The name of the service. + :ivar region_name: The name of the region. + """ + + fmt = ( + 'Unable to construct an endpoint for ' + '{service_name} in region {region_name}' + ) + + +class UnknownFIPSEndpointError(BaseEndpointResolverError): + """ + Could not construct a FIPS endpoint. + + :ivar service_name: The name of the service. + :ivar region_name: The name of the region. + """ + + fmt = ( + 'The provided FIPS pseudo-region "{region_name}" is not known for ' + 'the service "{service_name}". A FIPS compliant endpoint cannot be ' + 'constructed.' + ) + + +class ProfileNotFound(BotoCoreError): + """ + The specified configuration profile was not found in the + configuration file. + + :ivar profile: The name of the profile the user attempted to load. + """ + + fmt = 'The config profile ({profile}) could not be found' + + +class ConfigParseError(BotoCoreError): + """ + The configuration file could not be parsed. + + :ivar path: The path to the configuration file. + """ + + fmt = 'Unable to parse config file: {path}' + + +class ConfigNotFound(BotoCoreError): + """ + The specified configuration file could not be found. + + :ivar path: The path to the configuration file. + """ + + fmt = 'The specified config file ({path}) could not be found.' + + +class MissingParametersError(BotoCoreError): + """ + One or more required parameters were not supplied. + + :ivar object: The object that has missing parameters. + This can be an operation or a parameter (in the + case of inner params). The str() of this object + will be used so it doesn't need to implement anything + other than str(). + :ivar missing: The names of the missing parameters. + """ + + fmt = ( + 'The following required parameters are missing for ' + '{object_name}: {missing}' + ) + + +class ValidationError(BotoCoreError): + """ + An exception occurred validating parameters. + + Subclasses must accept a ``value`` and ``param`` + argument in their ``__init__``. + + :ivar value: The value that was being validated. + :ivar param: The parameter that failed validation. + :ivar type_name: The name of the underlying type. + """ + + fmt = "Invalid value ('{value}') for param {param} of type {type_name} " + + +class ParamValidationError(BotoCoreError): + fmt = 'Parameter validation failed:\n{report}' + + +# These exceptions subclass from ValidationError so that code +# can just 'except ValidationError' to catch any possibly validation +# error. +class UnknownKeyError(ValidationError): + """ + Unknown key in a struct parameter. + + :ivar value: The value that was being checked. + :ivar param: The name of the parameter. + :ivar choices: The valid choices the value can be. + """ + + fmt = ( + "Unknown key '{value}' for param '{param}'. Must be one of: {choices}" + ) + + +class RangeError(ValidationError): + """ + A parameter value was out of the valid range. + + :ivar value: The value that was being checked. + :ivar param: The parameter that failed validation. + :ivar min_value: The specified minimum value. + :ivar max_value: The specified maximum value. + """ + + fmt = ( + 'Value out of range for param {param}: ' + '{min_value} <= {value} <= {max_value}' + ) + + +class UnknownParameterError(ValidationError): + """ + Unknown top level parameter. + + :ivar name: The name of the unknown parameter. + :ivar operation: The name of the operation. + :ivar choices: The valid choices the parameter name can be. + """ + + fmt = ( + "Unknown parameter '{name}' for operation {operation}. Must be one " + "of: {choices}" + ) + + +class InvalidRegionError(ValidationError, ValueError): + """ + Invalid region_name provided to client or resource. + + :ivar region_name: region_name that was being validated. + """ + + fmt = "Provided region_name '{region_name}' doesn't match a supported format." + + +class AliasConflictParameterError(ValidationError): + """ + Error when an alias is provided for a parameter as well as the original. + + :ivar original: The name of the original parameter. + :ivar alias: The name of the alias + :ivar operation: The name of the operation. + """ + + fmt = ( + "Parameter '{original}' and its alias '{alias}' were provided " + "for operation {operation}. Only one of them may be used." + ) + + +class UnknownServiceStyle(BotoCoreError): + """ + Unknown style of service invocation. + + :ivar service_style: The style requested. + """ + + fmt = 'The service style ({service_style}) is not understood.' + + +class PaginationError(BotoCoreError): + fmt = 'Error during pagination: {message}' + + +class OperationNotPageableError(BotoCoreError): + fmt = 'Operation cannot be paginated: {operation_name}' + + +class ChecksumError(BotoCoreError): + """The expected checksum did not match the calculated checksum.""" + + fmt = ( + 'Checksum {checksum_type} failed, expected checksum ' + '{expected_checksum} did not match calculated checksum ' + '{actual_checksum}.' + ) + + +class UnseekableStreamError(BotoCoreError): + """Need to seek a stream, but stream does not support seeking.""" + + fmt = ( + 'Need to rewind the stream {stream_object}, but stream ' + 'is not seekable.' + ) + + +class WaiterError(BotoCoreError): + """Waiter failed to reach desired state.""" + + fmt = 'Waiter {name} failed: {reason}' + + def __init__(self, name, reason, last_response): + super().__init__(name=name, reason=reason) + self.last_response = last_response + + +class IncompleteReadError(BotoCoreError): + """HTTP response did not return expected number of bytes.""" + + fmt = '{actual_bytes} read, but total bytes expected is {expected_bytes}.' + + +class InvalidExpressionError(BotoCoreError): + """Expression is either invalid or too complex.""" + + fmt = 'Invalid expression {expression}: Only dotted lookups are supported.' + + +class UnknownCredentialError(BotoCoreError): + """Tried to insert before/after an unregistered credential type.""" + + fmt = 'Credential named {name} not found.' + + +class WaiterConfigError(BotoCoreError): + """Error when processing waiter configuration.""" + + fmt = 'Error processing waiter config: {error_msg}' + + +class UnknownClientMethodError(BotoCoreError): + """Error when trying to access a method on a client that does not exist.""" + + fmt = 'Client does not have method: {method_name}' + + +class UnsupportedSignatureVersionError(BotoCoreError): + """Error when trying to use an unsupported Signature Version.""" + + fmt = 'Signature version(s) are not supported: {signature_version}' + + +class ClientError(Exception): + MSG_TEMPLATE = ( + 'An error occurred ({error_code}) when calling the {operation_name} ' + 'operation{retry_info}: {error_message}' + ) + + def __init__(self, error_response, operation_name): + retry_info = self._get_retry_info(error_response) + error = error_response.get('Error', {}) + msg = self.MSG_TEMPLATE.format( + error_code=error.get('Code', 'Unknown'), + error_message=error.get('Message', 'Unknown'), + operation_name=operation_name, + retry_info=retry_info, + ) + super().__init__(msg) + self.response = error_response + self.operation_name = operation_name + + def _get_retry_info(self, response): + retry_info = '' + if 'ResponseMetadata' in response: + metadata = response['ResponseMetadata'] + if metadata.get('MaxAttemptsReached', False): + if 'RetryAttempts' in metadata: + retry_info = ( + f" (reached max retries: {metadata['RetryAttempts']})" + ) + return retry_info + + def __reduce__(self): + # Subclasses of ClientError's are dynamically generated and + # cannot be pickled unless they are attributes of a + # module. So at the very least return a ClientError back. + return ClientError, (self.response, self.operation_name) + + +class EventStreamError(ClientError): + pass + + +class UnsupportedTLSVersionWarning(Warning): + """Warn when an openssl version that uses TLS 1.2 is required""" + + pass + + +class ImminentRemovalWarning(Warning): + pass + + +class InvalidDNSNameError(BotoCoreError): + """Error when virtual host path is forced on a non-DNS compatible bucket""" + + fmt = ( + 'Bucket named {bucket_name} is not DNS compatible. Virtual ' + 'hosted-style addressing cannot be used. The addressing style ' + 'can be configured by removing the addressing_style value ' + 'or setting that value to \'path\' or \'auto\' in the AWS Config ' + 'file or in the botocore.client.Config object.' + ) + + +class InvalidS3AddressingStyleError(BotoCoreError): + """Error when an invalid path style is specified""" + + fmt = ( + 'S3 addressing style {s3_addressing_style} is invalid. Valid options ' + 'are: \'auto\', \'virtual\', and \'path\'' + ) + + +class UnsupportedS3ArnError(BotoCoreError): + """Error when S3 ARN provided to Bucket parameter is not supported""" + + fmt = ( + 'S3 ARN {arn} provided to "Bucket" parameter is invalid. Only ' + 'ARNs for S3 access-points are supported.' + ) + + +class UnsupportedS3ControlArnError(BotoCoreError): + """Error when S3 ARN provided to S3 control parameter is not supported""" + + fmt = 'S3 ARN "{arn}" provided is invalid for this operation. {msg}' + + +class InvalidHostLabelError(BotoCoreError): + """Error when an invalid host label would be bound to an endpoint""" + + fmt = ( + 'Invalid host label to be bound to the hostname of the endpoint: ' + '"{label}".' + ) + + +class UnsupportedOutpostResourceError(BotoCoreError): + """Error when S3 Outpost ARN provided to Bucket parameter is incomplete""" + + fmt = ( + 'S3 Outpost ARN resource "{resource_name}" provided to "Bucket" ' + 'parameter is invalid. Only ARNs for S3 Outpost arns with an ' + 'access-point sub-resource are supported.' + ) + + +class UnsupportedS3ConfigurationError(BotoCoreError): + """Error when an unsupported configuration is used with access-points""" + + fmt = 'Unsupported configuration when using S3: {msg}' + + +class UnsupportedS3AccesspointConfigurationError(BotoCoreError): + """Error when an unsupported configuration is used with access-points""" + + fmt = 'Unsupported configuration when using S3 access-points: {msg}' + + +class InvalidEndpointDiscoveryConfigurationError(BotoCoreError): + """Error when invalid value supplied for endpoint_discovery_enabled""" + + fmt = ( + 'Unsupported configuration value for endpoint_discovery_enabled. ' + 'Expected one of ("true", "false", "auto") but got {config_value}.' + ) + + +class UnsupportedS3ControlConfigurationError(BotoCoreError): + """Error when an unsupported configuration is used with S3 Control""" + + fmt = 'Unsupported configuration when using S3 Control: {msg}' + + +class InvalidRetryConfigurationError(BotoCoreError): + """Error when invalid retry configuration is specified""" + + fmt = ( + 'Cannot provide retry configuration for "{retry_config_option}". ' + 'Valid retry configuration options are: {valid_options}' + ) + + +class InvalidMaxRetryAttemptsError(InvalidRetryConfigurationError): + """Error when invalid retry configuration is specified""" + + fmt = ( + 'Value provided to "max_attempts": {provided_max_attempts} must ' + 'be an integer greater than or equal to {min_value}.' + ) + + +class InvalidRetryModeError(InvalidRetryConfigurationError): + """Error when invalid retry mode configuration is specified""" + + fmt = ( + 'Invalid value provided to "mode": "{provided_retry_mode}" must ' + 'be one of: {valid_modes}' + ) + + +class InvalidS3UsEast1RegionalEndpointConfigError(BotoCoreError): + """Error for invalid s3 us-east-1 regional endpoints configuration""" + + fmt = ( + 'S3 us-east-1 regional endpoint option ' + '{s3_us_east_1_regional_endpoint_config} is ' + 'invalid. Valid options are: "legacy", "regional"' + ) + + +class InvalidSTSRegionalEndpointsConfigError(BotoCoreError): + """Error when invalid sts regional endpoints configuration is specified""" + + fmt = ( + 'STS regional endpoints option {sts_regional_endpoints_config} is ' + 'invalid. Valid options are: "legacy", "regional"' + ) + + +class StubResponseError(BotoCoreError): + fmt = ( + 'Error getting response stub for operation {operation_name}: {reason}' + ) + + +class StubAssertionError(StubResponseError, AssertionError): + pass + + +class UnStubbedResponseError(StubResponseError): + pass + + +class InvalidConfigError(BotoCoreError): + fmt = '{error_msg}' + + +class InfiniteLoopConfigError(InvalidConfigError): + fmt = ( + 'Infinite loop in credential configuration detected. Attempting to ' + 'load from profile {source_profile} which has already been visited. ' + 'Visited profiles: {visited_profiles}' + ) + + +class RefreshWithMFAUnsupportedError(BotoCoreError): + fmt = 'Cannot refresh credentials: MFA token required.' + + +class MD5UnavailableError(BotoCoreError): + fmt = "This system does not support MD5 generation." + + +class MissingDependencyException(BotoCoreError): + fmt = "Missing Dependency: {msg}" + + +class MetadataRetrievalError(BotoCoreError): + fmt = "Error retrieving metadata: {error_msg}" + + +class UndefinedModelAttributeError(Exception): + pass + + +class MissingServiceIdError(UndefinedModelAttributeError): + fmt = ( + "The model being used for the service {service_name} is missing the " + "serviceId metadata property, which is required." + ) + + def __init__(self, **kwargs): + msg = self.fmt.format(**kwargs) + Exception.__init__(self, msg) + self.kwargs = kwargs + + +class SSOError(BotoCoreError): + fmt = ( + "An unspecified error happened when resolving AWS credentials or an " + "access token from SSO." + ) + + +class SSOTokenLoadError(SSOError): + fmt = "Error loading SSO Token: {error_msg}" + + +class UnauthorizedSSOTokenError(SSOError): + fmt = ( + "The SSO session associated with this profile has expired or is " + "otherwise invalid. To refresh this SSO session run aws sso login " + "with the corresponding profile." + ) + + +class LoginError(BotoCoreError): + fmt = ( + "An unspecified error happened when resolving AWS credentials or " + "refreshing a login session profile." + ) + + +class LoginRefreshRequired(LoginError): + fmt = "Your session has expired or credentials have changed. Please reauthenticate using 'aws login'." + + +class LoginInsufficientPermissions(LoginError): + fmt = ( + "Unable to create or refresh login credentials due to insufficient " + "permissions. You may be missing permission for the 'signin:CreateOAuth2Token' action." + ) + + +class LoginTokenLoadError(LoginError): + fmt = "Error loading login session token: {error_msg}" + + +class LoginAuthorizationCodeError(LoginError): + fmt = "Error loading or redeeming a login authorization code: {error_msg} " + + +class CapacityNotAvailableError(BotoCoreError): + fmt = 'Insufficient request capacity available.' + + +class InvalidProxiesConfigError(BotoCoreError): + fmt = 'Invalid configuration value(s) provided for proxies_config.' + + +class InvalidDefaultsMode(BotoCoreError): + fmt = ( + 'Client configured with invalid defaults mode: {mode}. ' + 'Valid defaults modes include: {valid_modes}.' + ) + + +class AwsChunkedWrapperError(BotoCoreError): + fmt = '{error_msg}' + + +class FlexibleChecksumError(BotoCoreError): + fmt = '{error_msg}' + + +class InvalidEndpointConfigurationError(BotoCoreError): + fmt = 'Invalid endpoint configuration: {msg}' + + +class EndpointProviderError(BotoCoreError): + """Base error for the EndpointProvider class""" + + fmt = '{msg}' + + +class EndpointResolutionError(EndpointProviderError): + """Error when input parameters resolve to an error rule""" + + fmt = '{msg}' + + +class UnknownEndpointResolutionBuiltInName(EndpointProviderError): + fmt = 'Unknown builtin variable name: {name}' + + +class InvalidChecksumConfigError(BotoCoreError): + """Error when an invalid checksum config value is supplied.""" + + fmt = ( + 'Unsupported configuration value for {config_key}. ' + 'Expected one of {valid_options} but got {config_value}.' + ) + + +class UnsupportedServiceProtocolsError(BotoCoreError): + """Error when a service does not use any protocol supported by botocore.""" + + fmt = ( + 'Botocore supports {botocore_supported_protocols}, but service {service} only ' + 'supports {service_supported_protocols}.' + ) diff --git a/py311/lib/python3.11/site-packages/botocore/handlers.py b/py311/lib/python3.11/site-packages/botocore/handlers.py new file mode 100644 index 0000000000000000000000000000000000000000..65f12db705f96720c3a41cd24a05920d9e7c270d --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/handlers.py @@ -0,0 +1,1727 @@ +# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +"""Builtin event handlers. + +This module contains builtin handlers for events emitted by botocore. +""" + +import base64 +import copy +import logging +import os +import re +import uuid +import warnings +from io import BytesIO + +import botocore +import botocore.auth +from botocore import ( + retryhandler, # noqa: F401 + translate, # noqa: F401 + utils, +) +from botocore.args import ClientConfigString +from botocore.compat import ( + MD5_AVAILABLE, # noqa: F401 + ETree, + OrderedDict, + XMLParseError, + ensure_bytes, + get_md5, + json, + quote, + unquote, + unquote_str, + urlsplit, + urlunsplit, +) +from botocore.docs.utils import ( + AppendParamDocumentation, + AutoPopulatedParam, + HideParamFromOperations, +) +from botocore.endpoint_provider import VALID_HOST_LABEL_RE +from botocore.exceptions import ( + AliasConflictParameterError, + MissingServiceIdError, # noqa: F401 + ParamValidationError, + UnsupportedTLSVersionWarning, +) +from botocore.regions import EndpointResolverBuiltins +from botocore.serialize import TIMESTAMP_PRECISION_MILLISECOND +from botocore.signers import ( + add_dsql_generate_db_auth_token_methods, + add_generate_db_auth_token, + add_generate_presigned_post, + add_generate_presigned_url, +) +from botocore.useragent import register_feature_id +from botocore.utils import ( + SAFE_CHARS, + SERVICE_NAME_ALIASES, # noqa: F401 + ArnParser, + get_token_from_environment, + hyphenize_service_id, # noqa: F401 + is_global_accesspoint, # noqa: F401 + percent_encode, + switch_host_with_param, +) + +logger = logging.getLogger(__name__) + +REGISTER_FIRST = object() +REGISTER_LAST = object() +# From the S3 docs: +# The rules for bucket names in the US Standard region allow bucket names +# to be as long as 255 characters, and bucket names can contain any +# combination of uppercase letters, lowercase letters, numbers, periods +# (.), hyphens (-), and underscores (_). +VALID_BUCKET = re.compile(r'^[a-zA-Z0-9.\-_]{1,255}$') +_ACCESSPOINT_ARN = ( + r'^arn:(aws).*:(s3|s3-object-lambda):[a-z\-0-9]*:[0-9]{12}:accesspoint[/:]' + r'[a-zA-Z0-9\-.]{1,63}$' +) +_OUTPOST_ARN = ( + r'^arn:(aws).*:s3-outposts:[a-z\-0-9]+:[0-9]{12}:outpost[/:]' + r'[a-zA-Z0-9\-]{1,63}[/:]accesspoint[/:][a-zA-Z0-9\-]{1,63}$' +) +VALID_S3_ARN = re.compile('|'.join([_ACCESSPOINT_ARN, _OUTPOST_ARN])) +# signing names used for the services s3 and s3-control, for example in +# botocore/data/s3/2006-03-01/endpoints-rule-set-1.json +S3_SIGNING_NAMES = ('s3', 's3-outposts', 's3-object-lambda', 's3express') +VERSION_ID_SUFFIX = re.compile(r'\?versionId=[^\s]+$') + + +def handle_service_name_alias(service_name, **kwargs): + return SERVICE_NAME_ALIASES.get(service_name, service_name) + + +def add_recursion_detection_header(params, **kwargs): + has_lambda_name = 'AWS_LAMBDA_FUNCTION_NAME' in os.environ + trace_id = os.environ.get('_X_AMZN_TRACE_ID') + if has_lambda_name and trace_id: + headers = params['headers'] + if 'X-Amzn-Trace-Id' not in headers: + headers['X-Amzn-Trace-Id'] = quote(trace_id, safe='-=;:+&[]{}"\',') + + +def escape_xml_payload(params, **kwargs): + # Replace \r and \n with the escaped sequence over the whole XML document + # to avoid linebreak normalization modifying customer input when the + # document is parsed. Ideally, we would do this in ElementTree.tostring, + # but it doesn't allow us to override entity escaping for text fields. For + # this operation \r and \n can only appear in the XML document if they were + # passed as part of the customer input. + body = params['body'] + if b'\r' in body: + body = body.replace(b'\r', b' ') + if b'\n' in body: + body = body.replace(b'\n', b' ') + + params['body'] = body + + +def check_for_200_error(response, **kwargs): + """This function has been deprecated, but is kept for backwards compatibility.""" + # From: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html + # There are two opportunities for a copy request to return an error. One + # can occur when Amazon S3 receives the copy request and the other can + # occur while Amazon S3 is copying the files. If the error occurs before + # the copy operation starts, you receive a standard Amazon S3 error. If the + # error occurs during the copy operation, the error response is embedded in + # the 200 OK response. This means that a 200 OK response can contain either + # a success or an error. Make sure to design your application to parse the + # contents of the response and handle it appropriately. + # + # So this handler checks for this case. Even though the server sends a + # 200 response, conceptually this should be handled exactly like a + # 500 response (with respect to raising exceptions, retries, etc.) + # We're connected *before* all the other retry logic handlers, so as long + # as we switch the error code to 500, we'll retry the error as expected. + if response is None: + # A None response can happen if an exception is raised while + # trying to retrieve the response. See Endpoint._get_response(). + return + http_response, parsed = response + if _looks_like_special_case_error( + http_response.status_code, http_response.content + ): + logger.debug( + "Error found for response with 200 status code, " + "errors: %s, changing status code to " + "500.", + parsed, + ) + http_response.status_code = 500 + + +def _looks_like_special_case_error(status_code, body): + if status_code == 200 and body: + try: + parser = ETree.XMLParser( + target=ETree.TreeBuilder(), encoding='utf-8' + ) + parser.feed(body) + root = parser.close() + except XMLParseError: + # In cases of network disruptions, we may end up with a partial + # streamed response from S3. We need to treat these cases as + # 500 Service Errors and try again. + return True + if root.tag == 'Error': + return True + return False + + +def set_operation_specific_signer(context, signing_name, **kwargs): + """Choose the operation-specific signer. + + Individual operations may have a different auth type than the service as a + whole. This will most often manifest as operations that should not be + authenticated at all, but can include other auth modes such as sigv4 + without body signing. + """ + auth_type = context.get('auth_type') + + # Auth type will be None if the operation doesn't have a configured auth + # type. + if not auth_type: + return + + # Auth type will be the string value 'none' if the operation should not + # be signed at all. + if auth_type == 'none': + return botocore.UNSIGNED + + if auth_type == 'bearer': + return 'bearer' + + # If the operation needs an unsigned body, we set additional context + # allowing the signer to be aware of this. + if context.get('unsigned_payload') or auth_type == 'v4-unsigned-body': + context['payload_signing_enabled'] = False + + if auth_type.startswith('v4'): + if auth_type == 'v4-s3express': + return auth_type + + if auth_type == 'v4a': + # If sigv4a is chosen, we must add additional signing config for + # global signature. + region = _resolve_sigv4a_region(context) + signing = {'region': region, 'signing_name': signing_name} + if 'signing' in context: + context['signing'].update(signing) + else: + context['signing'] = signing + signature_version = 'v4a' + else: + signature_version = 'v4' + + # Signing names used by s3 and s3-control use customized signers "s3v4" + # and "s3v4a". + if signing_name in S3_SIGNING_NAMES: + signature_version = f's3{signature_version}' + + return signature_version + + +def _handle_sqs_compatible_error(parsed, context, **kwargs): + """ + Ensures backward compatibility for SQS errors. + + SQS's migration from the Query protocol to JSON was done prior to SDKs allowing a + service to support multiple protocols. Because of this, SQS is missing the "error" + key from its modeled exceptions, which is used by most query compatible services + to map error codes to the proper exception. Instead, SQS uses the error's shape name, + which is preserved in the QueryErrorCode key. + """ + parsed_error = parsed.get("Error", {}) + if not parsed_error: + return + + if query_code := parsed_error.get("QueryErrorCode"): + context['error_code_override'] = query_code + + +def _resolve_sigv4a_region(context): + region = None + if 'client_config' in context: + region = context['client_config'].sigv4a_signing_region_set + if not region and context.get('signing', {}).get('region'): + region = context['signing']['region'] + return region or '*' + + +def decode_console_output(parsed, **kwargs): + if 'Output' in parsed: + try: + # We're using 'replace' for errors because it is + # possible that console output contains non string + # chars we can't utf-8 decode. + value = base64.b64decode( + bytes(parsed['Output'], 'latin-1') + ).decode('utf-8', 'replace') + parsed['Output'] = value + except (ValueError, TypeError, AttributeError): + logger.debug('Error decoding base64', exc_info=True) + + +def generate_idempotent_uuid(params, model, **kwargs): + for name in model.idempotent_members: + if name not in params: + params[name] = str(uuid.uuid4()) + logger.debug( + "injecting idempotency token (%s) into param '%s'.", + params[name], + name, + ) + + +def decode_quoted_jsondoc(value): + try: + value = json.loads(unquote(value)) + except (ValueError, TypeError): + logger.debug('Error loading quoted JSON', exc_info=True) + return value + + +def json_decode_template_body(parsed, **kwargs): + if 'TemplateBody' in parsed: + try: + value = json.loads( + parsed['TemplateBody'], object_pairs_hook=OrderedDict + ) + parsed['TemplateBody'] = value + except (ValueError, TypeError): + logger.debug('error loading JSON', exc_info=True) + + +def validate_bucket_name(params, **kwargs): + if 'Bucket' not in params: + return + bucket = params['Bucket'] + if not VALID_BUCKET.search(bucket) and not VALID_S3_ARN.search(bucket): + error_msg = ( + f'Invalid bucket name "{bucket}": Bucket name must match ' + f'the regex "{VALID_BUCKET.pattern}" or be an ARN matching ' + f'the regex "{VALID_S3_ARN.pattern}"' + ) + raise ParamValidationError(report=error_msg) + + +def sse_md5(params, **kwargs): + """ + S3 server-side encryption requires the encryption key to be sent to the + server base64 encoded, as well as a base64-encoded MD5 hash of the + encryption key. This handler does both if the MD5 has not been set by + the caller. + """ + _sse_md5(params, 'SSECustomer') + + +def copy_source_sse_md5(params, **kwargs): + """ + S3 server-side encryption requires the encryption key to be sent to the + server base64 encoded, as well as a base64-encoded MD5 hash of the + encryption key. This handler does both if the MD5 has not been set by + the caller specifically if the parameter is for the copy-source sse-c key. + """ + _sse_md5(params, 'CopySourceSSECustomer') + + +def _sse_md5(params, sse_member_prefix='SSECustomer'): + if not _needs_s3_sse_customization(params, sse_member_prefix): + return + + sse_key_member = sse_member_prefix + 'Key' + sse_md5_member = sse_member_prefix + 'KeyMD5' + key_as_bytes = params[sse_key_member] + if isinstance(key_as_bytes, str): + key_as_bytes = key_as_bytes.encode('utf-8') + md5_val = get_md5(key_as_bytes, usedforsecurity=False).digest() + key_md5_str = base64.b64encode(md5_val).decode('utf-8') + key_b64_encoded = base64.b64encode(key_as_bytes).decode('utf-8') + params[sse_key_member] = key_b64_encoded + params[sse_md5_member] = key_md5_str + + +def _needs_s3_sse_customization(params, sse_member_prefix): + return ( + params.get(sse_member_prefix + 'Key') is not None + and sse_member_prefix + 'KeyMD5' not in params + ) + + +def disable_signing(**kwargs): + """ + This handler disables request signing by setting the signer + name to a special sentinel value. + """ + return botocore.UNSIGNED + + +def add_expect_header(model, params, **kwargs): + if model.http.get('method', '') not in ['PUT', 'POST']: + return + if 'body' in params: + body = params['body'] + if hasattr(body, 'read'): + check_body = utils.ensure_boolean( + os.environ.get( + 'BOTO_EXPERIMENTAL__NO_EMPTY_CONTINUE', + False, + ) + ) + if check_body and utils.determine_content_length(body) == 0: + return + # Any file like object will use an expect 100-continue + # header regardless of size. + logger.debug("Adding expect 100 continue header to request.") + params['headers']['Expect'] = '100-continue' + + +class DeprecatedServiceDocumenter: + def __init__(self, replacement_service_name): + self._replacement_service_name = replacement_service_name + + def inject_deprecation_notice(self, section, event_name, **kwargs): + section.style.start_important() + section.write('This service client is deprecated. Please use ') + section.style.ref( + self._replacement_service_name, + self._replacement_service_name, + ) + section.write(' instead.') + section.style.end_important() + + +def document_copy_source_form(section, event_name, **kwargs): + if 'request-example' in event_name: + parent = section.get_section('structure-value') + param_line = parent.get_section('CopySource') + value_portion = param_line.get_section('member-value') + value_portion.clear_text() + value_portion.write( + "'string' or {'Bucket': 'string', " + "'Key': 'string', 'VersionId': 'string'}" + ) + elif 'request-params' in event_name: + param_section = section.get_section('CopySource') + type_section = param_section.get_section('param-type') + type_section.clear_text() + type_section.write(':type CopySource: str or dict') + doc_section = param_section.get_section('param-documentation') + doc_section.clear_text() + doc_section.write( + "The name of the source bucket, key name of the source object, " + "and optional version ID of the source object. You can either " + "provide this value as a string or a dictionary. The " + "string form is {bucket}/{key} or " + "{bucket}/{key}?versionId={versionId} if you want to copy a " + "specific version. You can also provide this value as a " + "dictionary. The dictionary format is recommended over " + "the string format because it is more explicit. The dictionary " + "format is: {'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}." + " Note that the VersionId key is optional and may be omitted." + " To specify an S3 access point, provide the access point" + " ARN for the ``Bucket`` key in the copy source dictionary. If you" + " want to provide the copy source for an S3 access point as a" + " string instead of a dictionary, the ARN provided must be the" + " full S3 access point object ARN" + " (i.e. {accesspoint_arn}/object/{key})" + ) + + +def handle_copy_source_param(params, **kwargs): + """Convert CopySource param for CopyObject/UploadPartCopy. + + This handler will deal with two cases: + + * CopySource provided as a string. We'll make a best effort + to URL encode the key name as required. This will require + parsing the bucket and version id from the CopySource value + and only encoding the key. + * CopySource provided as a dict. In this case we're + explicitly given the Bucket, Key, and VersionId so we're + able to encode the key and ensure this value is serialized + and correctly sent to S3. + + """ + source = params.get('CopySource') + if source is None: + # The call will eventually fail but we'll let the + # param validator take care of this. It will + # give a better error message. + return + if isinstance(source, str): + params['CopySource'] = _quote_source_header(source) + elif isinstance(source, dict): + params['CopySource'] = _quote_source_header_from_dict(source) + + +def _quote_source_header_from_dict(source_dict): + try: + bucket = source_dict['Bucket'] + key = source_dict['Key'] + version_id = source_dict.get('VersionId') + if VALID_S3_ARN.search(bucket): + final = f'{bucket}/object/{key}' + else: + final = f'{bucket}/{key}' + except KeyError as e: + raise ParamValidationError( + report=f'Missing required parameter: {str(e)}' + ) + final = percent_encode(final, safe=SAFE_CHARS + '/') + if version_id is not None: + final += f'?versionId={version_id}' + return final + + +def _quote_source_header(value): + result = VERSION_ID_SUFFIX.search(value) + if result is None: + return percent_encode(value, safe=SAFE_CHARS + '/') + else: + first, version_id = value[: result.start()], value[result.start() :] + return percent_encode(first, safe=SAFE_CHARS + '/') + version_id + + +def _get_cross_region_presigned_url( + request_signer, request_dict, model, source_region, destination_region +): + # The better way to do this is to actually get the + # endpoint_resolver and get the endpoint_url given the + # source region. In this specific case, we know that + # we can safely replace the dest region with the source + # region because of the supported EC2 regions, but in + # general this is not a safe assumption to make. + # I think eventually we should try to plumb through something + # that allows us to resolve endpoints from regions. + request_dict_copy = copy.deepcopy(request_dict) + request_dict_copy['body']['DestinationRegion'] = destination_region + request_dict_copy['url'] = request_dict['url'].replace( + destination_region, source_region + ) + request_dict_copy['method'] = 'GET' + request_dict_copy['headers'] = {} + return request_signer.generate_presigned_url( + request_dict_copy, region_name=source_region, operation_name=model.name + ) + + +def _get_presigned_url_source_and_destination_regions(request_signer, params): + # Gets the source and destination regions to be used + destination_region = request_signer._region_name + source_region = params.get('SourceRegion') + return source_region, destination_region + + +def inject_presigned_url_ec2(params, request_signer, model, **kwargs): + # The customer can still provide this, so we should pass if they do. + if 'PresignedUrl' in params['body']: + return + src, dest = _get_presigned_url_source_and_destination_regions( + request_signer, params['body'] + ) + url = _get_cross_region_presigned_url( + request_signer, params, model, src, dest + ) + params['body']['PresignedUrl'] = url + # EC2 Requires that the destination region be sent over the wire in + # addition to the source region. + params['body']['DestinationRegion'] = dest + + +def inject_presigned_url_rds(params, request_signer, model, **kwargs): + # SourceRegion is not required for RDS operations, so it's possible that + # it isn't set. In that case it's probably a local copy so we don't need + # to do anything else. + if 'SourceRegion' not in params['body']: + return + + src, dest = _get_presigned_url_source_and_destination_regions( + request_signer, params['body'] + ) + + # Since SourceRegion isn't actually modeled for RDS, it needs to be + # removed from the request params before we send the actual request. + del params['body']['SourceRegion'] + + if 'PreSignedUrl' in params['body']: + return + + url = _get_cross_region_presigned_url( + request_signer, params, model, src, dest + ) + params['body']['PreSignedUrl'] = url + + +def json_decode_policies(parsed, model, **kwargs): + # Any time an IAM operation returns a policy document + # it is a string that is json that has been urlencoded, + # i.e urlencode(json.dumps(policy_document)). + # To give users something more useful, we will urldecode + # this value and json.loads() the result so that they have + # the policy document as a dictionary. + output_shape = model.output_shape + if output_shape is not None: + _decode_policy_types(parsed, model.output_shape) + + +def _decode_policy_types(parsed, shape): + # IAM consistently uses the policyDocumentType shape to indicate + # strings that have policy documents. + shape_name = 'policyDocumentType' + if shape.type_name == 'structure': + for member_name, member_shape in shape.members.items(): + if ( + member_shape.type_name == 'string' + and member_shape.name == shape_name + and member_name in parsed + ): + parsed[member_name] = decode_quoted_jsondoc( + parsed[member_name] + ) + elif member_name in parsed: + _decode_policy_types(parsed[member_name], member_shape) + if shape.type_name == 'list': + shape_member = shape.member + for item in parsed: + _decode_policy_types(item, shape_member) + + +def parse_get_bucket_location(parsed, http_response, **kwargs): + # s3.GetBucketLocation cannot be modeled properly. To + # account for this we just manually parse the XML document. + # The "parsed" passed in only has the ResponseMetadata + # filled out. This handler will fill in the LocationConstraint + # value. + if http_response.raw is None: + return + response_body = http_response.content + parser = ETree.XMLParser(target=ETree.TreeBuilder(), encoding='utf-8') + parser.feed(response_body) + root = parser.close() + region = root.text + parsed['LocationConstraint'] = region + + +def base64_encode_user_data(params, **kwargs): + if 'UserData' in params: + if isinstance(params['UserData'], str): + # Encode it to bytes if it is text. + params['UserData'] = params['UserData'].encode('utf-8') + params['UserData'] = base64.b64encode(params['UserData']).decode( + 'utf-8' + ) + + +def document_base64_encoding(param): + description = ( + '**This value will be base64 encoded automatically. Do ' + 'not base64 encode this value prior to performing the ' + 'operation.**' + ) + append = AppendParamDocumentation(param, description) + return append.append_documentation + + +def validate_ascii_metadata(params, **kwargs): + """Verify S3 Metadata only contains ascii characters. + + From: http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html + + "Amazon S3 stores user-defined metadata in lowercase. Each name, value pair + must conform to US-ASCII when using REST and UTF-8 when using SOAP or + browser-based uploads via POST." + + """ + metadata = params.get('Metadata') + if not metadata or not isinstance(metadata, dict): + # We have to at least type check the metadata as a dict type + # because this handler is called before param validation. + # We'll go ahead and return because the param validator will + # give a descriptive error message for us. + # We might need a post-param validation event. + return + for key, value in metadata.items(): + try: + key.encode('ascii') + value.encode('ascii') + except UnicodeEncodeError: + error_msg = ( + 'Non ascii characters found in S3 metadata ' + f'for key "{key}", value: "{value}". \nS3 metadata can only ' + 'contain ASCII characters. ' + ) + raise ParamValidationError(report=error_msg) + + +def fix_route53_ids(params, model, **kwargs): + """ + Check for and split apart Route53 resource IDs, setting + only the last piece. This allows the output of one operation + (e.g. ``'foo/1234'``) to be used as input in another + operation (e.g. it expects just ``'1234'``). + """ + input_shape = model.input_shape + if not input_shape or not hasattr(input_shape, 'members'): + return + + members = [ + name + for (name, shape) in input_shape.members.items() + if shape.name in ['ResourceId', 'DelegationSetId', 'ChangeId'] + ] + + for name in members: + if name in params: + orig_value = params[name] + params[name] = orig_value.split('/')[-1] + logger.debug('%s %s -> %s', name, orig_value, params[name]) + + +def inject_account_id(params, **kwargs): + if params.get('accountId') is None: + # Glacier requires accountId, but allows you + # to specify '-' for the current owners account. + # We add this default value if the user does not + # provide the accountId as a convenience. + params['accountId'] = '-' + + +def add_glacier_version(model, params, **kwargs): + request_dict = params + request_dict['headers']['x-amz-glacier-version'] = model.metadata[ + 'apiVersion' + ] + + +def add_accept_header(model, params, **kwargs): + if params['headers'].get('Accept', None) is None: + request_dict = params + request_dict['headers']['Accept'] = 'application/json' + + +def add_glacier_checksums(params, **kwargs): + """Add glacier checksums to the http request. + + This will add two headers to the http request: + + * x-amz-content-sha256 + * x-amz-sha256-tree-hash + + These values will only be added if they are not present + in the HTTP request. + + """ + request_dict = params + headers = request_dict['headers'] + body = request_dict['body'] + if isinstance(body, bytes): + # If the user provided a bytes type instead of a file + # like object, we're temporarily create a BytesIO object + # so we can use the util functions to calculate the + # checksums which assume file like objects. Note that + # we're not actually changing the body in the request_dict. + body = BytesIO(body) + starting_position = body.tell() + if 'x-amz-content-sha256' not in headers: + headers['x-amz-content-sha256'] = utils.calculate_sha256( + body, as_hex=True + ) + body.seek(starting_position) + if 'x-amz-sha256-tree-hash' not in headers: + headers['x-amz-sha256-tree-hash'] = utils.calculate_tree_hash(body) + body.seek(starting_position) + + +def document_glacier_tree_hash_checksum(): + doc = ''' + This is a required field. + + Ideally you will want to compute this value with checksums from + previous uploaded parts, using the algorithm described in + `Glacier documentation `_. + + But if you prefer, you can also use botocore.utils.calculate_tree_hash() + to compute it from raw file by:: + + checksum = calculate_tree_hash(open('your_file.txt', 'rb')) + + ''' + return AppendParamDocumentation('checksum', doc).append_documentation + + +def document_cloudformation_get_template_return_type( + section, event_name, **kwargs +): + if 'response-params' in event_name: + template_body_section = section.get_section('TemplateBody') + type_section = template_body_section.get_section('param-type') + type_section.clear_text() + type_section.write('(*dict*) --') + elif 'response-example' in event_name: + parent = section.get_section('structure-value') + param_line = parent.get_section('TemplateBody') + value_portion = param_line.get_section('member-value') + value_portion.clear_text() + value_portion.write('{}') + + +def switch_host_machinelearning(request, **kwargs): + switch_host_with_param(request, 'PredictEndpoint') + + +def check_openssl_supports_tls_version_1_2(**kwargs): + import ssl + + try: + openssl_version_tuple = ssl.OPENSSL_VERSION_INFO + if openssl_version_tuple < (1, 0, 1): + warnings.warn( + f'Currently installed openssl version: {ssl.OPENSSL_VERSION} does not ' + 'support TLS 1.2, which is required for use of iot-data. ' + 'Please use python installed with openssl version 1.0.1 or ' + 'higher.', + UnsupportedTLSVersionWarning, + ) + # We cannot check the openssl version on python2.6, so we should just + # pass on this conveniency check. + except AttributeError: + pass + + +def change_get_to_post(request, **kwargs): + # This is useful when we need to change a potentially large GET request + # into a POST with x-www-form-urlencoded encoding. + if request.method == 'GET' and '?' in request.url: + request.headers['Content-Type'] = 'application/x-www-form-urlencoded' + request.method = 'POST' + request.url, request.data = request.url.split('?', 1) + + +def set_list_objects_encoding_type_url(params, context, **kwargs): + if 'EncodingType' not in params: + # We set this context so that we know it wasn't the customer that + # requested the encoding. + context['encoding_type_auto_set'] = True + params['EncodingType'] = 'url' + + +def decode_list_object(parsed, context, **kwargs): + # This is needed because we are passing url as the encoding type. Since the + # paginator is based on the key, we need to handle it before it can be + # round tripped. + # + # From the documentation: If you specify encoding-type request parameter, + # Amazon S3 includes this element in the response, and returns encoded key + # name values in the following response elements: + # Delimiter, Marker, Prefix, NextMarker, Key. + _decode_list_object( + top_level_keys=['Delimiter', 'Marker', 'NextMarker'], + nested_keys=[('Contents', 'Key'), ('CommonPrefixes', 'Prefix')], + parsed=parsed, + context=context, + ) + + +def decode_list_object_v2(parsed, context, **kwargs): + # From the documentation: If you specify encoding-type request parameter, + # Amazon S3 includes this element in the response, and returns encoded key + # name values in the following response elements: + # Delimiter, Prefix, ContinuationToken, Key, and StartAfter. + _decode_list_object( + top_level_keys=['Delimiter', 'Prefix', 'StartAfter'], + nested_keys=[('Contents', 'Key'), ('CommonPrefixes', 'Prefix')], + parsed=parsed, + context=context, + ) + + +def decode_list_object_versions(parsed, context, **kwargs): + # From the documentation: If you specify encoding-type request parameter, + # Amazon S3 includes this element in the response, and returns encoded key + # name values in the following response elements: + # KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter. + _decode_list_object( + top_level_keys=[ + 'KeyMarker', + 'NextKeyMarker', + 'Prefix', + 'Delimiter', + ], + nested_keys=[ + ('Versions', 'Key'), + ('DeleteMarkers', 'Key'), + ('CommonPrefixes', 'Prefix'), + ], + parsed=parsed, + context=context, + ) + + +def _decode_list_object(top_level_keys, nested_keys, parsed, context): + if parsed.get('EncodingType') == 'url' and context.get( + 'encoding_type_auto_set' + ): + # URL decode top-level keys in the response if present. + for key in top_level_keys: + if key in parsed: + parsed[key] = unquote_str(parsed[key]) + # URL decode nested keys from the response if present. + for top_key, child_key in nested_keys: + if top_key in parsed: + for member in parsed[top_key]: + member[child_key] = unquote_str(member[child_key]) + + +def convert_body_to_file_like_object(params, **kwargs): + if 'Body' in params: + if isinstance(params['Body'], str): + params['Body'] = BytesIO(ensure_bytes(params['Body'])) + elif isinstance(params['Body'], bytes): + params['Body'] = BytesIO(params['Body']) + + +def _add_parameter_aliases(handler_list): + # Mapping of original parameter to parameter alias. + # The key is ..parameter + # The first part of the key is used for event registration. + # The last part is the original parameter name and the value is the + # alias to expose in documentation. + aliases = { + 'ec2.*.Filter': 'Filters', + 'logs.CreateExportTask.from': 'fromTime', + 'cloudsearchdomain.Search.return': 'returnFields', + } + + for original, new_name in aliases.items(): + event_portion, original_name = original.rsplit('.', 1) + parameter_alias = ParameterAlias(original_name, new_name) + + # Add the handlers to the list of handlers. + # One handler is to handle when users provide the alias. + # The other handler is to update the documentation to show only + # the alias. + parameter_build_event_handler_tuple = ( + 'before-parameter-build.' + event_portion, + parameter_alias.alias_parameter_in_call, + REGISTER_FIRST, + ) + docs_event_handler_tuple = ( + 'docs.*.' + event_portion + '.complete-section', + parameter_alias.alias_parameter_in_documentation, + ) + handler_list.append(parameter_build_event_handler_tuple) + handler_list.append(docs_event_handler_tuple) + + +class ParameterAlias: + def __init__(self, original_name, alias_name): + self._original_name = original_name + self._alias_name = alias_name + + def alias_parameter_in_call(self, params, model, **kwargs): + if model.input_shape: + # Only consider accepting the alias if it is modeled in the + # input shape. + if self._original_name in model.input_shape.members: + if self._alias_name in params: + if self._original_name in params: + raise AliasConflictParameterError( + original=self._original_name, + alias=self._alias_name, + operation=model.name, + ) + # Remove the alias parameter value and use the old name + # instead. + params[self._original_name] = params.pop(self._alias_name) + + def alias_parameter_in_documentation(self, event_name, section, **kwargs): + if event_name.startswith('docs.request-params'): + if self._original_name not in section.available_sections: + return + # Replace the name for parameter type + param_section = section.get_section(self._original_name) + param_type_section = param_section.get_section('param-type') + self._replace_content(param_type_section) + + # Replace the name for the parameter description + param_name_section = param_section.get_section('param-name') + self._replace_content(param_name_section) + elif event_name.startswith('docs.request-example'): + section = section.get_section('structure-value') + if self._original_name not in section.available_sections: + return + # Replace the name for the example + param_section = section.get_section(self._original_name) + self._replace_content(param_section) + + def _replace_content(self, section): + content = section.getvalue().decode('utf-8') + updated_content = content.replace( + self._original_name, self._alias_name + ) + section.clear_text() + section.write(updated_content) + + +class ClientMethodAlias: + def __init__(self, actual_name): + """Aliases a non-extant method to an existing method. + + :param actual_name: The name of the method that actually exists on + the client. + """ + self._actual = actual_name + + def __call__(self, client, **kwargs): + return getattr(client, self._actual) + + +# TODO: Remove this class as it is no longer used +class HeaderToHostHoister: + """Takes a header and moves it to the front of the hoststring.""" + + _VALID_HOSTNAME = re.compile(r'(?!-)[a-z\d-]{1,63}(? 1 and ArnParser.is_arn( + unquote(auth_path_parts[1]) + ): + request.auth_path = '/'.join(['', *auth_path_parts[2:]]) + + +def customize_endpoint_resolver_builtins( + builtins, model, params, context, **kwargs +): + """Modify builtin parameter values for endpoint resolver + + Modifies the builtins dict in place. Changes are in effect for one call. + The corresponding event is emitted only if at least one builtin parameter + value is required for endpoint resolution for the operation. + """ + bucket_name = params.get('Bucket') + bucket_is_arn = bucket_name is not None and ArnParser.is_arn(bucket_name) + # In some situations the host will return AuthorizationHeaderMalformed + # when the signing region of a sigv4 request is not the bucket's + # region (which is likely unknown by the user of GetBucketLocation). + # Avoid this by always using path-style addressing. + if model.name == 'GetBucketLocation': + builtins[EndpointResolverBuiltins.AWS_S3_FORCE_PATH_STYLE] = True + # All situations where the bucket name is an ARN are not compatible + # with path style addressing. + elif bucket_is_arn: + builtins[EndpointResolverBuiltins.AWS_S3_FORCE_PATH_STYLE] = False + + # Bucket names that are invalid host labels require path-style addressing. + # If path-style addressing was specifically requested, the default builtin + # value is already set. + path_style_required = ( + bucket_name is not None and not VALID_HOST_LABEL_RE.match(bucket_name) + ) + path_style_requested = builtins[ + EndpointResolverBuiltins.AWS_S3_FORCE_PATH_STYLE + ] + + # Path-style addressing is incompatible with the global endpoint for + # presigned URLs. If the bucket name is an ARN, the ARN's region should be + # used in the endpoint. + if ( + context.get('use_global_endpoint') + and not path_style_required + and not path_style_requested + and not bucket_is_arn + and not utils.is_s3express_bucket(bucket_name) + ): + builtins[EndpointResolverBuiltins.AWS_REGION] = 'aws-global' + builtins[EndpointResolverBuiltins.AWS_S3_USE_GLOBAL_ENDPOINT] = True + + +def remove_content_type_header_for_presigning(request, **kwargs): + if ( + request.context.get('is_presign_request') is True + and 'Content-Type' in request.headers + ): + del request.headers['Content-Type'] + + +def handle_expires_header( + operation_model, response_dict, customized_response_dict, **kwargs +): + if _has_expires_shape(operation_model.output_shape): + if expires_value := response_dict.get('headers', {}).get('Expires'): + customized_response_dict['ExpiresString'] = expires_value + try: + utils.parse_timestamp(expires_value) + except (ValueError, RuntimeError): + logger.warning( + 'Failed to parse the "Expires" member as a timestamp: %s. ' + 'The unparsed value is available in the response under "ExpiresString".', + expires_value, + ) + del response_dict['headers']['Expires'] + + +def _has_expires_shape(shape): + if not shape: + return False + return any( + member_shape.name == 'Expires' + and member_shape.serialization.get('name') == 'Expires' + for member_shape in shape.members.values() + ) + + +def document_expires_shape(section, event_name, **kwargs): + # Updates the documentation for S3 operations that include the 'Expires' member + # in their response structure. Documents a synthetic member 'ExpiresString' and + # includes a deprecation notice for 'Expires'. + if 'response-example' in event_name: + if not section.has_section('structure-value'): + return + parent = section.get_section('structure-value') + if not parent.has_section('Expires'): + return + param_line = parent.get_section('Expires') + param_line.add_new_section('ExpiresString') + new_param_line = param_line.get_section('ExpiresString') + new_param_line.write("'ExpiresString': 'string',") + new_param_line.style.new_line() + elif 'response-params' in event_name: + if not section.has_section('Expires'): + return + param_section = section.get_section('Expires') + # Add a deprecation notice for the "Expires" param + doc_section = param_section.get_section('param-documentation') + doc_section.style.start_note() + doc_section.write( + 'This member has been deprecated. Please use ``ExpiresString`` instead.' + ) + doc_section.style.end_note() + # Document the "ExpiresString" param + new_param_section = param_section.add_new_section('ExpiresString') + new_param_section.style.new_paragraph() + new_param_section.write('- **ExpiresString** *(string) --*') + new_param_section.style.indent() + new_param_section.style.new_paragraph() + new_param_section.write( + 'The raw, unparsed value of the ``Expires`` field.' + ) + + +def _handle_200_error(operation_model, response_dict, **kwargs): + # S3 can return a 200 response with an error embedded in the body. + # Convert the 200 to a 500 for retry resolution in ``_update_status_code``. + if not _should_handle_200_error(operation_model, response_dict): + # Operations with streaming response blobs are excluded as they + # can't be reliably distinguished from an S3 error. + return + if _looks_like_special_case_error( + response_dict['status_code'], response_dict['body'] + ): + response_dict['status_code'] = 500 + logger.debug( + "Error found for response with 200 status code: %s.", + response_dict['body'], + ) + + +def _should_handle_200_error(operation_model, response_dict): + output_shape = operation_model.output_shape + if ( + not response_dict + or operation_model.has_event_stream_output + or not output_shape + ): + return False + payload = output_shape.serialization.get('payload') + if payload is not None: + payload_shape = output_shape.members[payload] + if payload_shape.type_name in ('blob', 'string'): + return False + return True + + +def _update_status_code(response, **kwargs): + # Update the http_response status code when the parsed response has been + # modified in a handler. This enables retries for cases like ``_handle_200_error``. + if response is None: + return + http_response, parsed = response + parsed_status_code = parsed.get('ResponseMetadata', {}).get( + 'HTTPStatusCode', http_response.status_code + ) + if http_response.status_code != parsed_status_code: + http_response.status_code = parsed_status_code + + +def _handle_request_validation_mode_member(params, model, **kwargs): + client_config = kwargs.get("context", {}).get("client_config") + if client_config is None: + return + response_checksum_validation = client_config.response_checksum_validation + http_checksum = model.http_checksum + mode_member = http_checksum.get("requestValidationModeMember") + if ( + mode_member is not None + and response_checksum_validation == "when_supported" + ): + params.setdefault(mode_member, "ENABLED") + + +def _set_extra_headers_for_unsigned_request( + request, signature_version, **kwargs +): + # When sending a checksum in the trailer of an unsigned chunked request, S3 + # requires us to set the "X-Amz-Content-SHA256" header to "STREAMING-UNSIGNED-PAYLOAD-TRAILER". + checksum_context = request.context.get("checksum", {}) + algorithm = checksum_context.get("request_algorithm", {}) + in_trailer = algorithm.get("in") == "trailer" + headers = request.headers + if signature_version == botocore.UNSIGNED and in_trailer: + headers["X-Amz-Content-SHA256"] = "STREAMING-UNSIGNED-PAYLOAD-TRAILER" + + +def _set_auth_scheme_preference_signer(context, signing_name, **kwargs): + """ + Determines the appropriate signer to use based on the client configuration, + authentication scheme preferences, and the availability of a bearer token. + """ + client_config = context.get('client_config') + if client_config is None: + return + + signature_version = client_config.signature_version + auth_scheme_preference = client_config.auth_scheme_preference + auth_options = context.get('auth_options') + + signature_version_set_in_code = ( + isinstance(signature_version, ClientConfigString) + or signature_version is botocore.UNSIGNED + ) + auth_preference_set_in_code = isinstance( + auth_scheme_preference, ClientConfigString + ) + has_in_code_configuration = ( + signature_version_set_in_code or auth_preference_set_in_code + ) + + resolved_signature_version = signature_version + + # If signature version was not set in code, but an auth scheme preference + # is available, resolve it based on the preferred schemes and supported auth + # options for this service. + if ( + not signature_version_set_in_code + and auth_scheme_preference + and auth_options + ): + preferred_schemes = auth_scheme_preference.split(',') + resolved = botocore.auth.resolve_auth_scheme_preference( + preferred_schemes, auth_options + ) + resolved_signature_version = ( + botocore.UNSIGNED if resolved == 'none' else resolved + ) + + # Prefer 'bearer' signature version if a bearer token is available, and it + # is allowed for this service. This can override earlier resolution if the + # config object didn't explicitly set a signature version. + if _should_prefer_bearer_auth( + has_in_code_configuration, + signing_name, + resolved_signature_version, + auth_options, + ): + register_feature_id('BEARER_SERVICE_ENV_VARS') + resolved_signature_version = 'bearer' + + if resolved_signature_version == signature_version: + return None + return resolved_signature_version + + +def _should_prefer_bearer_auth( + has_in_code_configuration, + signing_name, + resolved_signature_version, + auth_options, +): + if signing_name not in get_bearer_auth_supported_services(): + return False + + if not auth_options or 'smithy.api#httpBearerAuth' not in auth_options: + return False + + has_token = get_token_from_environment(signing_name) is not None + + # Prefer 'bearer' if a bearer token is available, and either: + # Bearer was already resolved, or + # No auth-related values were explicitly set in code + return has_token and ( + resolved_signature_version == 'bearer' or not has_in_code_configuration + ) + + +def get_bearer_auth_supported_services(): + """ + Returns a set of services that support bearer token authentication. + These values correspond to the service's `signingName` property as defined + in model.py, falling back to `endpointPrefix` if `signingName` is not set. + + Warning: This is a private interface and is subject to abrupt breaking changes, + including removal, in any botocore release. It is not intended for external use, + and its usage outside of botocore is not advised or supported. + """ + return {'bedrock'} + + +# This is a list of (event_name, handler). +# When a Session is created, everything in this list will be +# automatically registered with that Session. + +BUILTIN_HANDLERS = [ + ('choose-service-name', handle_service_name_alias), + ( + 'getattr.mturk.list_hi_ts_for_qualification_type', + ClientMethodAlias('list_hits_for_qualification_type'), + ), + ( + 'getattr.socialmessaging.delete_whatsapp_media_message', + ClientMethodAlias('delete_whatsapp_message_media'), + ), + ( + 'before-parameter-build.s3.UploadPart', + convert_body_to_file_like_object, + REGISTER_LAST, + ), + ( + 'before-parameter-build.s3.PutObject', + convert_body_to_file_like_object, + REGISTER_LAST, + ), + ('creating-client-class', add_generate_presigned_url), + ('creating-client-class.s3', add_generate_presigned_post), + ('creating-client-class.iot-data', check_openssl_supports_tls_version_1_2), + ('creating-client-class.lex-runtime-v2', remove_lex_v2_start_conversation), + ('creating-client-class.qbusiness', remove_qbusiness_chat), + ( + 'creating-client-class.bedrock-runtime', + remove_bedrock_runtime_invoke_model_with_bidirectional_stream, + ), + ( + 'creating-serializer.bedrock-agentcore', + enable_millisecond_timestamp_precision, + ), + ('after-call.iam', json_decode_policies), + ('after-call.ec2.GetConsoleOutput', decode_console_output), + ('after-call.cloudformation.GetTemplate', json_decode_template_body), + ('after-call.s3.GetBucketLocation', parse_get_bucket_location), + ( + 'after-call.sqs.*', + _handle_sqs_compatible_error, + ), + ('before-parse.s3.*', handle_expires_header), + ('before-parse.s3.*', _handle_200_error, REGISTER_FIRST), + ('before-parameter-build', generate_idempotent_uuid), + ('before-parameter-build', _handle_request_validation_mode_member), + ('before-parameter-build.s3', validate_bucket_name), + ('before-parameter-build.s3', remove_bucket_from_url_paths_from_model), + ( + 'before-parameter-build.s3.ListObjects', + set_list_objects_encoding_type_url, + ), + ( + 'before-parameter-build.s3.ListObjectsV2', + set_list_objects_encoding_type_url, + ), + ( + 'before-parameter-build.s3.ListObjectVersions', + set_list_objects_encoding_type_url, + ), + ('before-parameter-build.s3.CopyObject', handle_copy_source_param), + ('before-parameter-build.s3.UploadPartCopy', handle_copy_source_param), + ('before-parameter-build.s3.CopyObject', validate_ascii_metadata), + ('before-parameter-build.s3.PutObject', validate_ascii_metadata), + ( + 'before-parameter-build.s3.CreateMultipartUpload', + validate_ascii_metadata, + ), + ('before-parameter-build.s3-control', remove_accid_host_prefix_from_model), + ('docs.*.s3.CopyObject.complete-section', document_copy_source_form), + ('docs.*.s3.UploadPartCopy.complete-section', document_copy_source_form), + ('docs.response-example.s3.*.complete-section', document_expires_shape), + ('docs.response-params.s3.*.complete-section', document_expires_shape), + ('before-endpoint-resolution.s3', customize_endpoint_resolver_builtins), + ('before-call', add_recursion_detection_header), + ('before-call.s3', add_expect_header), + ('before-call.glacier', add_glacier_version), + ('before-call.apigateway', add_accept_header), + ('before-call.s3.DeleteObjects', escape_xml_payload), + ('before-call.s3.PutBucketLifecycleConfiguration', escape_xml_payload), + ('before-call.glacier.UploadArchive', add_glacier_checksums), + ('before-call.glacier.UploadMultipartPart', add_glacier_checksums), + ('before-call.ec2.CopySnapshot', inject_presigned_url_ec2), + ('request-created', add_retry_headers), + ('request-created.machinelearning.Predict', switch_host_machinelearning), + ('needs-retry.s3.*', _update_status_code, REGISTER_FIRST), + ('choose-signer.cognito-identity.GetId', disable_signing), + ('choose-signer.cognito-identity.GetOpenIdToken', disable_signing), + ('choose-signer.cognito-identity.UnlinkIdentity', disable_signing), + ( + 'choose-signer.cognito-identity.GetCredentialsForIdentity', + disable_signing, + ), + ('choose-signer.sts.AssumeRoleWithSAML', disable_signing), + ('choose-signer.sts.AssumeRoleWithWebIdentity', disable_signing), + ('choose-signer', set_operation_specific_signer), + ('choose-signer', _set_auth_scheme_preference_signer), + ('before-parameter-build.s3.HeadObject', sse_md5), + ('before-parameter-build.s3.GetObject', sse_md5), + ('before-parameter-build.s3.PutObject', sse_md5), + ('before-parameter-build.s3.CopyObject', sse_md5), + ('before-parameter-build.s3.CopyObject', copy_source_sse_md5), + ('before-parameter-build.s3.CreateMultipartUpload', sse_md5), + ('before-parameter-build.s3.UploadPart', sse_md5), + ('before-parameter-build.s3.UploadPartCopy', sse_md5), + ('before-parameter-build.s3.UploadPartCopy', copy_source_sse_md5), + ('before-parameter-build.s3.CompleteMultipartUpload', sse_md5), + ('before-parameter-build.s3.SelectObjectContent', sse_md5), + ('before-parameter-build.ec2.RunInstances', base64_encode_user_data), + ( + 'before-parameter-build.autoscaling.CreateLaunchConfiguration', + base64_encode_user_data, + ), + ('before-parameter-build.route53', fix_route53_ids), + ('before-parameter-build.glacier', inject_account_id), + ('before-sign.s3', remove_arn_from_signing_path), + ('before-sign.s3', _set_extra_headers_for_unsigned_request), + ( + 'before-sign.polly.SynthesizeSpeech', + remove_content_type_header_for_presigning, + ), + ('after-call.s3.ListObjects', decode_list_object), + ('after-call.s3.ListObjectsV2', decode_list_object_v2), + ('after-call.s3.ListObjectVersions', decode_list_object_versions), + # Cloudsearchdomain search operation will be sent by HTTP POST + ('request-created.cloudsearchdomain.Search', change_get_to_post), + # Glacier documentation customizations + ( + 'docs.*.glacier.*.complete-section', + AutoPopulatedParam( + 'accountId', + 'Note: this parameter is set to "-" by' + 'default if no value is not specified.', + ).document_auto_populated_param, + ), + ( + 'docs.*.glacier.UploadArchive.complete-section', + AutoPopulatedParam('checksum').document_auto_populated_param, + ), + ( + 'docs.*.glacier.UploadMultipartPart.complete-section', + AutoPopulatedParam('checksum').document_auto_populated_param, + ), + ( + 'docs.request-params.glacier.CompleteMultipartUpload.complete-section', + document_glacier_tree_hash_checksum(), + ), + # Cloudformation documentation customizations + ( + 'docs.*.cloudformation.GetTemplate.complete-section', + document_cloudformation_get_template_return_type, + ), + # UserData base64 encoding documentation customizations + ( + 'docs.*.ec2.RunInstances.complete-section', + document_base64_encoding('UserData'), + ), + ( + 'docs.*.autoscaling.CreateLaunchConfiguration.complete-section', + document_base64_encoding('UserData'), + ), + # EC2 CopySnapshot documentation customizations + ( + 'docs.*.ec2.CopySnapshot.complete-section', + AutoPopulatedParam('PresignedUrl').document_auto_populated_param, + ), + ( + 'docs.*.ec2.CopySnapshot.complete-section', + AutoPopulatedParam('DestinationRegion').document_auto_populated_param, + ), + # S3 SSE documentation modifications + ( + 'docs.*.s3.*.complete-section', + AutoPopulatedParam('SSECustomerKeyMD5').document_auto_populated_param, + ), + # S3 SSE Copy Source documentation modifications + ( + 'docs.*.s3.*.complete-section', + AutoPopulatedParam( + 'CopySourceSSECustomerKeyMD5' + ).document_auto_populated_param, + ), + # Add base64 information to Lambda + ( + 'docs.*.lambda.UpdateFunctionCode.complete-section', + document_base64_encoding('ZipFile'), + ), + # The following S3 operations cannot actually accept a ContentMD5 + ( + 'docs.*.s3.*.complete-section', + HideParamFromOperations( + 's3', + 'ContentMD5', + [ + 'DeleteObjects', + 'PutBucketAcl', + 'PutBucketCors', + 'PutBucketLifecycle', + 'PutBucketLogging', + 'PutBucketNotification', + 'PutBucketPolicy', + 'PutBucketReplication', + 'PutBucketRequestPayment', + 'PutBucketTagging', + 'PutBucketVersioning', + 'PutBucketWebsite', + 'PutObjectAcl', + ], + ).hide_param, + ), + ############# + # DSQL + ############# + ('creating-client-class.dsql', add_dsql_generate_db_auth_token_methods), + ############# + # RDS + ############# + ('creating-client-class.rds', add_generate_db_auth_token), + ('before-call.rds.CopyDBClusterSnapshot', inject_presigned_url_rds), + ('before-call.rds.CreateDBCluster', inject_presigned_url_rds), + ('before-call.rds.CopyDBSnapshot', inject_presigned_url_rds), + ('before-call.rds.CreateDBInstanceReadReplica', inject_presigned_url_rds), + ( + 'before-call.rds.StartDBInstanceAutomatedBackupsReplication', + inject_presigned_url_rds, + ), + # RDS PresignedUrl documentation customizations + ( + 'docs.*.rds.CopyDBClusterSnapshot.complete-section', + AutoPopulatedParam('PreSignedUrl').document_auto_populated_param, + ), + ( + 'docs.*.rds.CreateDBCluster.complete-section', + AutoPopulatedParam('PreSignedUrl').document_auto_populated_param, + ), + ( + 'docs.*.rds.CopyDBSnapshot.complete-section', + AutoPopulatedParam('PreSignedUrl').document_auto_populated_param, + ), + ( + 'docs.*.rds.CreateDBInstanceReadReplica.complete-section', + AutoPopulatedParam('PreSignedUrl').document_auto_populated_param, + ), + ( + 'docs.*.rds.StartDBInstanceAutomatedBackupsReplication.complete-section', + AutoPopulatedParam('PreSignedUrl').document_auto_populated_param, + ), + ############# + # Neptune + ############# + ('before-call.neptune.CopyDBClusterSnapshot', inject_presigned_url_rds), + ('before-call.neptune.CreateDBCluster', inject_presigned_url_rds), + # Neptune PresignedUrl documentation customizations + ( + 'docs.*.neptune.CopyDBClusterSnapshot.complete-section', + AutoPopulatedParam('PreSignedUrl').document_auto_populated_param, + ), + ( + 'docs.*.neptune.CreateDBCluster.complete-section', + AutoPopulatedParam('PreSignedUrl').document_auto_populated_param, + ), + ############# + # DocDB + ############# + ('before-call.docdb.CopyDBClusterSnapshot', inject_presigned_url_rds), + ('before-call.docdb.CreateDBCluster', inject_presigned_url_rds), + # DocDB PresignedUrl documentation customizations + ( + 'docs.*.docdb.CopyDBClusterSnapshot.complete-section', + AutoPopulatedParam('PreSignedUrl').document_auto_populated_param, + ), + ( + 'docs.*.docdb.CreateDBCluster.complete-section', + AutoPopulatedParam('PreSignedUrl').document_auto_populated_param, + ), + ('before-call', inject_api_version_header_if_needed), +] +_add_parameter_aliases(BUILTIN_HANDLERS) diff --git a/py311/lib/python3.11/site-packages/botocore/history.py b/py311/lib/python3.11/site-packages/botocore/history.py new file mode 100644 index 0000000000000000000000000000000000000000..59d9481d7fb5ed48c737c6c835bd916a78aa3ff8 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/history.py @@ -0,0 +1,55 @@ +# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import logging + +HISTORY_RECORDER = None +logger = logging.getLogger(__name__) + + +class BaseHistoryHandler: + def emit(self, event_type, payload, source): + raise NotImplementedError('emit()') + + +class HistoryRecorder: + def __init__(self): + self._enabled = False + self._handlers = [] + + def enable(self): + self._enabled = True + + def disable(self): + self._enabled = False + + def add_handler(self, handler): + self._handlers.append(handler) + + def record(self, event_type, payload, source='BOTOCORE'): + if self._enabled and self._handlers: + for handler in self._handlers: + try: + handler.emit(event_type, payload, source) + except Exception: + # Never let the process die because we had a failure in + # a record collection handler. + logger.debug( + "Exception raised in %s.", handler, exc_info=True + ) + + +def get_global_history_recorder(): + global HISTORY_RECORDER + if HISTORY_RECORDER is None: + HISTORY_RECORDER = HistoryRecorder() + return HISTORY_RECORDER diff --git a/py311/lib/python3.11/site-packages/botocore/hooks.py b/py311/lib/python3.11/site-packages/botocore/hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..c672b1c0506fd3afd4a1e6a75e7472d689014afa --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/hooks.py @@ -0,0 +1,660 @@ +# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import copy +import logging +from collections import deque, namedtuple + +from botocore.compat import accepts_kwargs +from botocore.utils import EVENT_ALIASES + +logger = logging.getLogger(__name__) + + +_NodeList = namedtuple('NodeList', ['first', 'middle', 'last']) +_FIRST = 0 +_MIDDLE = 1 +_LAST = 2 + + +class NodeList(_NodeList): + def __copy__(self): + first_copy = copy.copy(self.first) + middle_copy = copy.copy(self.middle) + last_copy = copy.copy(self.last) + copied = NodeList(first_copy, middle_copy, last_copy) + return copied + + +def first_non_none_response(responses, default=None): + """Find first non None response in a list of tuples. + + This function can be used to find the first non None response from + handlers connected to an event. This is useful if you are interested + in the returned responses from event handlers. Example usage:: + + print(first_non_none_response([(func1, None), (func2, 'foo'), + (func3, 'bar')])) + # This will print 'foo' + + :type responses: list of tuples + :param responses: The responses from the ``EventHooks.emit`` method. + This is a list of tuples, and each tuple is + (handler, handler_response). + + :param default: If no non-None responses are found, then this default + value will be returned. + + :return: The first non-None response in the list of tuples. + + """ + for response in responses: + if response[1] is not None: + return response[1] + return default + + +class BaseEventHooks: + def emit(self, event_name, **kwargs): + """Call all handlers subscribed to an event. + + :type event_name: str + :param event_name: The name of the event to emit. + + :type **kwargs: dict + :param **kwargs: Arbitrary kwargs to pass through to the + subscribed handlers. The ``event_name`` will be injected + into the kwargs so it's not necessary to add this to **kwargs. + + :rtype: list of tuples + :return: A list of ``(handler_func, handler_func_return_value)`` + + """ + return [] + + def register( + self, event_name, handler, unique_id=None, unique_id_uses_count=False + ): + """Register an event handler for a given event. + + If a ``unique_id`` is given, the handler will not be registered + if a handler with the ``unique_id`` has already been registered. + + Handlers are called in the order they have been registered. + Note handlers can also be registered with ``register_first()`` + and ``register_last()``. All handlers registered with + ``register_first()`` are called before handlers registered + with ``register()`` which are called before handlers registered + with ``register_last()``. + + """ + self._verify_and_register( + event_name, + handler, + unique_id, + register_method=self._register, + unique_id_uses_count=unique_id_uses_count, + ) + + def register_first( + self, event_name, handler, unique_id=None, unique_id_uses_count=False + ): + """Register an event handler to be called first for an event. + + All event handlers registered with ``register_first()`` will + be called before handlers registered with ``register()`` and + ``register_last()``. + + """ + self._verify_and_register( + event_name, + handler, + unique_id, + register_method=self._register_first, + unique_id_uses_count=unique_id_uses_count, + ) + + def register_last( + self, event_name, handler, unique_id=None, unique_id_uses_count=False + ): + """Register an event handler to be called last for an event. + + All event handlers registered with ``register_last()`` will be called + after handlers registered with ``register_first()`` and ``register()``. + + """ + self._verify_and_register( + event_name, + handler, + unique_id, + register_method=self._register_last, + unique_id_uses_count=unique_id_uses_count, + ) + + def _verify_and_register( + self, + event_name, + handler, + unique_id, + register_method, + unique_id_uses_count, + ): + self._verify_is_callable(handler) + self._verify_accept_kwargs(handler) + register_method(event_name, handler, unique_id, unique_id_uses_count) + + def unregister( + self, + event_name, + handler=None, + unique_id=None, + unique_id_uses_count=False, + ): + """Unregister an event handler for a given event. + + If no ``unique_id`` was given during registration, then the + first instance of the event handler is removed (if the event + handler has been registered multiple times). + + """ + pass + + def _verify_is_callable(self, func): + if not callable(func): + raise ValueError(f"Event handler {func} must be callable.") + + def _verify_accept_kwargs(self, func): + """Verifies a callable accepts kwargs + + :type func: callable + :param func: A callable object. + + :returns: True, if ``func`` accepts kwargs, otherwise False. + + """ + try: + if not accepts_kwargs(func): + raise ValueError( + f"Event handler {func} must accept keyword " + f"arguments (**kwargs)" + ) + except TypeError: + return False + + +class HierarchicalEmitter(BaseEventHooks): + def __init__(self): + # We keep a reference to the handlers for quick + # read only access (we never modify self._handlers). + # A cache of event name to handler list. + self._lookup_cache = {} + self._handlers = _PrefixTrie() + # This is used to ensure that unique_id's are only + # registered once. + self._unique_id_handlers = {} + + def _emit(self, event_name, kwargs, stop_on_response=False): + """ + Emit an event with optional keyword arguments. + + :type event_name: string + :param event_name: Name of the event + :type kwargs: dict + :param kwargs: Arguments to be passed to the handler functions. + :type stop_on_response: boolean + :param stop_on_response: Whether to stop on the first non-None + response. If False, then all handlers + will be called. This is especially useful + to handlers which mutate data and then + want to stop propagation of the event. + :rtype: list + :return: List of (handler, response) tuples from all processed + handlers. + """ + responses = [] + # Invoke the event handlers from most specific + # to least specific, each time stripping off a dot. + handlers_to_call = self._lookup_cache.get(event_name) + if handlers_to_call is None: + handlers_to_call = self._handlers.prefix_search(event_name) + self._lookup_cache[event_name] = handlers_to_call + elif not handlers_to_call: + # Short circuit and return an empty response is we have + # no handlers to call. This is the common case where + # for the majority of signals, nothing is listening. + return [] + kwargs['event_name'] = event_name + responses = [] + for handler in handlers_to_call: + logger.debug('Event %s: calling handler %s', event_name, handler) + response = handler(**kwargs) + responses.append((handler, response)) + if stop_on_response and response is not None: + return responses + return responses + + def emit(self, event_name, **kwargs): + """ + Emit an event by name with arguments passed as keyword args. + + >>> responses = emitter.emit( + ... 'my-event.service.operation', arg1='one', arg2='two') + + :rtype: list + :return: List of (handler, response) tuples from all processed + handlers. + """ + return self._emit(event_name, kwargs) + + def emit_until_response(self, event_name, **kwargs): + """ + Emit an event by name with arguments passed as keyword args, + until the first non-``None`` response is received. This + method prevents subsequent handlers from being invoked. + + >>> handler, response = emitter.emit_until_response( + 'my-event.service.operation', arg1='one', arg2='two') + + :rtype: tuple + :return: The first (handler, response) tuple where the response + is not ``None``, otherwise (``None``, ``None``). + """ + responses = self._emit(event_name, kwargs, stop_on_response=True) + if responses: + return responses[-1] + else: + return (None, None) + + def _register( + self, event_name, handler, unique_id=None, unique_id_uses_count=False + ): + self._register_section( + event_name, + handler, + unique_id, + unique_id_uses_count, + section=_MIDDLE, + ) + + def _register_first( + self, event_name, handler, unique_id=None, unique_id_uses_count=False + ): + self._register_section( + event_name, + handler, + unique_id, + unique_id_uses_count, + section=_FIRST, + ) + + def _register_last( + self, event_name, handler, unique_id, unique_id_uses_count=False + ): + self._register_section( + event_name, handler, unique_id, unique_id_uses_count, section=_LAST + ) + + def _register_section( + self, event_name, handler, unique_id, unique_id_uses_count, section + ): + if unique_id is not None: + if unique_id in self._unique_id_handlers: + # We've already registered a handler using this unique_id + # so we don't need to register it again. + count = self._unique_id_handlers[unique_id].get('count', None) + if unique_id_uses_count: + if not count: + raise ValueError( + f"Initial registration of unique id {unique_id} was " + "specified to use a counter. Subsequent register " + "calls to unique id must specify use of a counter " + "as well." + ) + else: + self._unique_id_handlers[unique_id]['count'] += 1 + else: + if count: + raise ValueError( + f"Initial registration of unique id {unique_id} was " + "specified to not use a counter. Subsequent " + "register calls to unique id must specify not to " + "use a counter as well." + ) + return + else: + # Note that the trie knows nothing about the unique + # id. We track uniqueness in this class via the + # _unique_id_handlers. + self._handlers.append_item( + event_name, handler, section=section + ) + unique_id_handler_item = {'handler': handler} + if unique_id_uses_count: + unique_id_handler_item['count'] = 1 + self._unique_id_handlers[unique_id] = unique_id_handler_item + else: + self._handlers.append_item(event_name, handler, section=section) + # Super simple caching strategy for now, if we change the registrations + # clear the cache. This has the opportunity for smarter invalidations. + self._lookup_cache = {} + + def unregister( + self, + event_name, + handler=None, + unique_id=None, + unique_id_uses_count=False, + ): + if unique_id is not None: + try: + count = self._unique_id_handlers[unique_id].get('count', None) + except KeyError: + # There's no handler matching that unique_id so we have + # nothing to unregister. + return + if unique_id_uses_count: + if count is None: + raise ValueError( + f"Initial registration of unique id {unique_id} was specified to " + "use a counter. Subsequent unregister calls to unique " + "id must specify use of a counter as well." + ) + elif count == 1: + handler = self._unique_id_handlers.pop(unique_id)[ + 'handler' + ] + else: + self._unique_id_handlers[unique_id]['count'] -= 1 + return + else: + if count: + raise ValueError( + f"Initial registration of unique id {unique_id} was specified " + "to not use a counter. Subsequent unregister calls " + "to unique id must specify not to use a counter as " + "well." + ) + handler = self._unique_id_handlers.pop(unique_id)['handler'] + try: + self._handlers.remove_item(event_name, handler) + self._lookup_cache = {} + except ValueError: + pass + + def __copy__(self): + new_instance = self.__class__() + new_state = self.__dict__.copy() + new_state['_handlers'] = copy.copy(self._handlers) + new_state['_unique_id_handlers'] = copy.copy(self._unique_id_handlers) + new_instance.__dict__ = new_state + return new_instance + + +class EventAliaser(BaseEventHooks): + def __init__(self, event_emitter, event_aliases=None): + self._event_aliases = event_aliases + if event_aliases is None: + self._event_aliases = EVENT_ALIASES + self._alias_name_cache = {} + self._emitter = event_emitter + + def emit(self, event_name, **kwargs): + aliased_event_name = self._alias_event_name(event_name) + return self._emitter.emit(aliased_event_name, **kwargs) + + def emit_until_response(self, event_name, **kwargs): + aliased_event_name = self._alias_event_name(event_name) + return self._emitter.emit_until_response(aliased_event_name, **kwargs) + + def register( + self, event_name, handler, unique_id=None, unique_id_uses_count=False + ): + aliased_event_name = self._alias_event_name(event_name) + return self._emitter.register( + aliased_event_name, handler, unique_id, unique_id_uses_count + ) + + def register_first( + self, event_name, handler, unique_id=None, unique_id_uses_count=False + ): + aliased_event_name = self._alias_event_name(event_name) + return self._emitter.register_first( + aliased_event_name, handler, unique_id, unique_id_uses_count + ) + + def register_last( + self, event_name, handler, unique_id=None, unique_id_uses_count=False + ): + aliased_event_name = self._alias_event_name(event_name) + return self._emitter.register_last( + aliased_event_name, handler, unique_id, unique_id_uses_count + ) + + def unregister( + self, + event_name, + handler=None, + unique_id=None, + unique_id_uses_count=False, + ): + aliased_event_name = self._alias_event_name(event_name) + return self._emitter.unregister( + aliased_event_name, handler, unique_id, unique_id_uses_count + ) + + def _alias_event_name(self, event_name): + if event_name in self._alias_name_cache: + return self._alias_name_cache[event_name] + + for old_part, new_part in self._event_aliases.items(): + # We can't simply do a string replace for everything, otherwise we + # might end up translating substrings that we never intended to + # translate. When there aren't any dots in the old event name + # part, then we can quickly replace the item in the list if it's + # there. + event_parts = event_name.split('.') + if '.' not in old_part: + try: + # Theoretically a given event name could have the same part + # repeated, but in practice this doesn't happen + event_parts[event_parts.index(old_part)] = new_part + except ValueError: + continue + + # If there's dots in the name, it gets more complicated. Now we + # have to replace multiple sections of the original event. + elif old_part in event_name: + old_parts = old_part.split('.') + self._replace_subsection(event_parts, old_parts, new_part) + else: + continue + + new_name = '.'.join(event_parts) + logger.debug( + "Changing event name from %s to %s", event_name, new_name + ) + self._alias_name_cache[event_name] = new_name + return new_name + + self._alias_name_cache[event_name] = event_name + return event_name + + def _replace_subsection(self, sections, old_parts, new_part): + for i in range(len(sections)): + if ( + sections[i] == old_parts[0] + and sections[i : i + len(old_parts)] == old_parts + ): + sections[i : i + len(old_parts)] = [new_part] + return + + def __copy__(self): + return self.__class__( + copy.copy(self._emitter), copy.copy(self._event_aliases) + ) + + +class _PrefixTrie: + """Specialized prefix trie that handles wildcards. + + The prefixes in this case are based on dot separated + names so 'foo.bar.baz' is:: + + foo -> bar -> baz + + Wildcard support just means that having a key such as 'foo.bar.*.baz' will + be matched with a call to ``get_items(key='foo.bar.ANYTHING.baz')``. + + You can think of this prefix trie as the equivalent as defaultdict(list), + except that it can do prefix searches: + + foo.bar.baz -> A + foo.bar -> B + foo -> C + + Calling ``get_items('foo.bar.baz')`` will return [A + B + C], from + most specific to least specific. + + """ + + def __init__(self): + # Each dictionary can be though of as a node, where a node + # has values associated with the node, and children is a link + # to more nodes. So 'foo.bar' would have a 'foo' node with + # a 'bar' node as a child of foo. + # {'foo': {'children': {'bar': {...}}}}. + self._root = {'chunk': None, 'children': {}, 'values': None} + + def append_item(self, key, value, section=_MIDDLE): + """Add an item to a key. + + If a value is already associated with that key, the new + value is appended to the list for the key. + """ + key_parts = key.split('.') + current = self._root + for part in key_parts: + if part not in current['children']: + new_child = {'chunk': part, 'values': None, 'children': {}} + current['children'][part] = new_child + current = new_child + else: + current = current['children'][part] + if current['values'] is None: + current['values'] = NodeList([], [], []) + current['values'][section].append(value) + + def prefix_search(self, key): + """Collect all items that are prefixes of key. + + Prefix in this case are delineated by '.' characters so + 'foo.bar.baz' is a 3 chunk sequence of 3 "prefixes" ( + "foo", "bar", and "baz"). + + """ + collected = deque() + key_parts = key.split('.') + current = self._root + self._get_items(current, key_parts, collected, 0) + return collected + + def _get_items(self, starting_node, key_parts, collected, starting_index): + stack = [(starting_node, starting_index)] + key_parts_len = len(key_parts) + # Traverse down the nodes, where at each level we add the + # next part from key_parts as well as the wildcard element '*'. + # This means for each node we see we potentially add two more + # elements to our stack. + while stack: + current_node, index = stack.pop() + if current_node['values']: + # We're using extendleft because we want + # the values associated with the node furthest + # from the root to come before nodes closer + # to the root. extendleft() also adds its items + # in right-left order so .extendleft([1, 2, 3]) + # will result in final_list = [3, 2, 1], which is + # why we reverse the lists. + node_list = current_node['values'] + complete_order = ( + node_list.first + node_list.middle + node_list.last + ) + collected.extendleft(reversed(complete_order)) + if not index == key_parts_len: + children = current_node['children'] + directs = children.get(key_parts[index]) + wildcard = children.get('*') + next_index = index + 1 + if wildcard is not None: + stack.append((wildcard, next_index)) + if directs is not None: + stack.append((directs, next_index)) + + def remove_item(self, key, value): + """Remove an item associated with a key. + + If the value is not associated with the key a ``ValueError`` + will be raised. If the key does not exist in the trie, a + ``ValueError`` will be raised. + + """ + key_parts = key.split('.') + current = self._root + self._remove_item(current, key_parts, value, index=0) + + def _remove_item(self, current_node, key_parts, value, index): + if current_node is None: + return + elif index < len(key_parts): + next_node = current_node['children'].get(key_parts[index]) + if next_node is not None: + self._remove_item(next_node, key_parts, value, index + 1) + if index == len(key_parts) - 1: + node_list = next_node['values'] + if value in node_list.first: + node_list.first.remove(value) + elif value in node_list.middle: + node_list.middle.remove(value) + elif value in node_list.last: + node_list.last.remove(value) + if not next_node['children'] and not next_node['values']: + # Then this is a leaf node with no values so + # we can just delete this link from the parent node. + # This makes subsequent search faster in the case + # where a key does not exist. + del current_node['children'][key_parts[index]] + else: + raise ValueError(f"key is not in trie: {'.'.join(key_parts)}") + + def __copy__(self): + # The fact that we're using a nested dict under the covers + # is an implementation detail, and the user shouldn't have + # to know that they'd normally need a deepcopy so we expose + # __copy__ instead of __deepcopy__. + new_copy = self.__class__() + copied_attrs = self._recursive_copy(self.__dict__) + new_copy.__dict__ = copied_attrs + return new_copy + + def _recursive_copy(self, node): + # We can't use copy.deepcopy because we actually only want to copy + # the structure of the trie, not the handlers themselves. + # Each node has a chunk, children, and values. + copied_node = {} + for key, value in node.items(): + if isinstance(value, NodeList): + copied_node[key] = copy.copy(value) + elif isinstance(value, dict): + copied_node[key] = self._recursive_copy(value) + else: + copied_node[key] = value + return copied_node diff --git a/py311/lib/python3.11/site-packages/botocore/httpchecksum.py b/py311/lib/python3.11/site-packages/botocore/httpchecksum.py new file mode 100644 index 0000000000000000000000000000000000000000..c4a68769650dc2a3cc16707b25ede3d4fedb8d7b --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/httpchecksum.py @@ -0,0 +1,574 @@ +# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +"""The interfaces in this module are not intended for public use. + +This module defines interfaces for applying checksums to HTTP requests within +the context of botocore. This involves both resolving the checksum to be used +based on client configuration and environment, as well as application of the +checksum to the request. +""" + +import base64 +import io +import logging +from binascii import crc32 +from hashlib import sha1, sha256 + +from botocore.compat import HAS_CRT, has_minimum_crt_version, urlparse +from botocore.exceptions import ( + AwsChunkedWrapperError, + FlexibleChecksumError, + MissingDependencyException, +) +from botocore.model import StructureShape +from botocore.response import StreamingBody +from botocore.useragent import register_feature_id +from botocore.utils import ( + conditionally_calculate_md5, + determine_content_length, + has_checksum_header, +) + +if HAS_CRT: + from awscrt import checksums as crt_checksums +else: + crt_checksums = None + +logger = logging.getLogger(__name__) + +DEFAULT_CHECKSUM_ALGORITHM = "CRC32" + + +class BaseChecksum: + _CHUNK_SIZE = 1024 * 1024 + + def update(self, chunk): + pass + + def digest(self): + pass + + def b64digest(self): + bs = self.digest() + return base64.b64encode(bs).decode("ascii") + + def _handle_fileobj(self, fileobj): + start_position = fileobj.tell() + for chunk in iter(lambda: fileobj.read(self._CHUNK_SIZE), b""): + self.update(chunk) + fileobj.seek(start_position) + + def handle(self, body): + if isinstance(body, (bytes, bytearray)): + self.update(body) + else: + self._handle_fileobj(body) + return self.b64digest() + + +class Crc32Checksum(BaseChecksum): + def __init__(self): + self._int_crc32 = 0 + + def update(self, chunk): + self._int_crc32 = crc32(chunk, self._int_crc32) & 0xFFFFFFFF + + def digest(self): + return self._int_crc32.to_bytes(4, byteorder="big") + + +class CrtCrc32Checksum(BaseChecksum): + # Note: This class is only used if the CRT is available + def __init__(self): + self._int_crc32 = 0 + + def update(self, chunk): + new_checksum = crt_checksums.crc32(chunk, self._int_crc32) + self._int_crc32 = new_checksum & 0xFFFFFFFF + + def digest(self): + return self._int_crc32.to_bytes(4, byteorder="big") + + +class CrtCrc32cChecksum(BaseChecksum): + # Note: This class is only used if the CRT is available + def __init__(self): + self._int_crc32c = 0 + + def update(self, chunk): + new_checksum = crt_checksums.crc32c(chunk, self._int_crc32c) + self._int_crc32c = new_checksum & 0xFFFFFFFF + + def digest(self): + return self._int_crc32c.to_bytes(4, byteorder="big") + + +class CrtCrc64NvmeChecksum(BaseChecksum): + # Note: This class is only used if the CRT is available + def __init__(self): + self._int_crc64nvme = 0 + + def update(self, chunk): + new_checksum = crt_checksums.crc64nvme(chunk, self._int_crc64nvme) + self._int_crc64nvme = new_checksum & 0xFFFFFFFFFFFFFFFF + + def digest(self): + return self._int_crc64nvme.to_bytes(8, byteorder="big") + + +class Sha1Checksum(BaseChecksum): + def __init__(self): + self._checksum = sha1() + + def update(self, chunk): + self._checksum.update(chunk) + + def digest(self): + return self._checksum.digest() + + +class Sha256Checksum(BaseChecksum): + def __init__(self): + self._checksum = sha256() + + def update(self, chunk): + self._checksum.update(chunk) + + def digest(self): + return self._checksum.digest() + + +class AwsChunkedWrapper: + _DEFAULT_CHUNK_SIZE = 1024 * 1024 + + def __init__( + self, + raw, + checksum_cls=None, + checksum_name="x-amz-checksum", + chunk_size=None, + ): + self._raw = raw + self._checksum_name = checksum_name + self._checksum_cls = checksum_cls + self._reset() + + if chunk_size is None: + chunk_size = self._DEFAULT_CHUNK_SIZE + self._chunk_size = chunk_size + + def _reset(self): + self._remaining = b"" + self._complete = False + self._checksum = None + if self._checksum_cls: + self._checksum = self._checksum_cls() + + def seek(self, offset, whence=0): + if offset != 0 or whence != 0: + raise AwsChunkedWrapperError( + error_msg="Can only seek to start of stream" + ) + self._reset() + self._raw.seek(0) + + def read(self, size=None): + # Normalize "read all" size values to None + if size is not None and size <= 0: + size = None + + # If the underlying body is done and we have nothing left then + # end the stream + if self._complete and not self._remaining: + return b"" + + # While we're not done and want more bytes + want_more_bytes = size is None or size > len(self._remaining) + while not self._complete and want_more_bytes: + self._remaining += self._make_chunk() + want_more_bytes = size is None or size > len(self._remaining) + + # If size was None, we want to return everything + if size is None: + size = len(self._remaining) + + # Return a chunk up to the size asked for + to_return = self._remaining[:size] + self._remaining = self._remaining[size:] + return to_return + + def _make_chunk(self): + # NOTE: Chunk size is not deterministic as read could return less. This + # means we cannot know the content length of the encoded aws-chunked + # stream ahead of time without ensuring a consistent chunk size + raw_chunk = self._raw.read(self._chunk_size) + hex_len = hex(len(raw_chunk))[2:].encode("ascii") + self._complete = not raw_chunk + + if self._checksum: + self._checksum.update(raw_chunk) + + if self._checksum and self._complete: + name = self._checksum_name.encode("ascii") + checksum = self._checksum.b64digest().encode("ascii") + return b"0\r\n%s:%s\r\n\r\n" % (name, checksum) + + return b"%s\r\n%s\r\n" % (hex_len, raw_chunk) + + def __iter__(self): + while not self._complete: + yield self._make_chunk() + + +class StreamingChecksumBody(StreamingBody): + def __init__(self, raw_stream, content_length, checksum, expected): + super().__init__(raw_stream, content_length) + self._checksum = checksum + self._expected = expected + + def read(self, amt=None): + chunk = super().read(amt=amt) + self._checksum.update(chunk) + if amt is None or (not chunk and amt > 0): + self._validate_checksum() + return chunk + + def readinto(self, b): + amount_read = super().readinto(b) + if amount_read == len(b): + view = b + else: + view = memoryview(b)[:amount_read] + self._checksum.update(view) + if amount_read == 0 and len(b) > 0: + self._validate_checksum() + return amount_read + + def _validate_checksum(self): + if self._checksum.digest() != base64.b64decode(self._expected): + error_msg = ( + f"Expected checksum {self._expected} did not match calculated " + f"checksum: {self._checksum.b64digest()}" + ) + raise FlexibleChecksumError(error_msg=error_msg) + + +def resolve_checksum_context(request, operation_model, params): + resolve_request_checksum_algorithm(request, operation_model, params) + resolve_response_checksum_algorithms(request, operation_model, params) + + +def resolve_request_checksum_algorithm( + request, + operation_model, + params, + supported_algorithms=None, +): + # If the header is already set by the customer, skip calculation + if has_checksum_header(request): + return + + checksum_context = request["context"].get("checksum", {}) + request_checksum_calculation = request["context"][ + "client_config" + ].request_checksum_calculation + http_checksum = operation_model.http_checksum + request_checksum_required = ( + operation_model.http_checksum_required + or http_checksum.get("requestChecksumRequired") + ) + algorithm_member = http_checksum.get("requestAlgorithmMember") + if algorithm_member and algorithm_member in params: + # If the client has opted into using flexible checksums and the + # request supports it, use that instead of checksum required + if supported_algorithms is None: + supported_algorithms = _SUPPORTED_CHECKSUM_ALGORITHMS + + algorithm_name = params[algorithm_member].lower() + if algorithm_name not in supported_algorithms: + if not HAS_CRT and algorithm_name in _CRT_CHECKSUM_ALGORITHMS: + raise MissingDependencyException( + msg=( + f"Using {algorithm_name.upper()} requires an " + "additional dependency. You will need to pip install " + "botocore[crt] before proceeding." + ) + ) + raise FlexibleChecksumError( + error_msg=f"Unsupported checksum algorithm: {algorithm_name}" + ) + elif request_checksum_required or ( + algorithm_member and request_checksum_calculation == "when_supported" + ): + # Don't use a default checksum for presigned requests. + if request["context"].get("is_presign_request"): + return + algorithm_name = DEFAULT_CHECKSUM_ALGORITHM.lower() + algorithm_member_header = _get_request_algorithm_member_header( + operation_model, request, algorithm_member + ) + if algorithm_member_header is not None: + checksum_context["request_algorithm_header"] = { + "name": algorithm_member_header, + "value": DEFAULT_CHECKSUM_ALGORITHM, + } + else: + return + + location_type = "header" + if ( + operation_model.has_streaming_input + and urlparse(request["url"]).scheme == "https" + ): + if request["context"]["client_config"].signature_version != 's3': + # Operations with streaming input must support trailers. + # We only support unsigned trailer checksums currently. As this + # disables payload signing we'll only use trailers over TLS. + location_type = "trailer" + + algorithm = { + "algorithm": algorithm_name, + "in": location_type, + "name": f"x-amz-checksum-{algorithm_name}", + } + + checksum_context["request_algorithm"] = algorithm + request["context"]["checksum"] = checksum_context + + +def _get_request_algorithm_member_header( + operation_model, request, algorithm_member +): + """Get the name of the header targeted by the "requestAlgorithmMember".""" + operation_input_shape = operation_model.input_shape + if not isinstance(operation_input_shape, StructureShape): + return + + algorithm_member_shape = operation_input_shape.members.get( + algorithm_member + ) + + if algorithm_member_shape: + return algorithm_member_shape.serialization.get("name") + + +def apply_request_checksum(request): + checksum_context = request.get("context", {}).get("checksum", {}) + algorithm = checksum_context.get("request_algorithm") + + if not algorithm: + return + + if algorithm == "conditional-md5": + # Special case to handle the http checksum required trait + conditionally_calculate_md5(request) + elif algorithm["in"] == "header": + _apply_request_header_checksum(request) + elif algorithm["in"] == "trailer": + _apply_request_trailer_checksum(request) + else: + raise FlexibleChecksumError( + error_msg="Unknown checksum variant: {}".format(algorithm["in"]) + ) + if "request_algorithm_header" in checksum_context: + request_algorithm_header = checksum_context["request_algorithm_header"] + request["headers"][request_algorithm_header["name"]] = ( + request_algorithm_header["value"] + ) + + +def _apply_request_header_checksum(request): + checksum_context = request.get("context", {}).get("checksum", {}) + algorithm = checksum_context.get("request_algorithm") + location_name = algorithm["name"] + if location_name in request["headers"]: + # If the header is already set by the customer, skip calculation + return + checksum_cls = _CHECKSUM_CLS.get(algorithm["algorithm"]) + digest = checksum_cls().handle(request["body"]) + request["headers"][location_name] = digest + _register_checksum_algorithm_feature_id(algorithm) + + +def _apply_request_trailer_checksum(request): + checksum_context = request.get("context", {}).get("checksum", {}) + algorithm = checksum_context.get("request_algorithm") + location_name = algorithm["name"] + checksum_cls = _CHECKSUM_CLS.get(algorithm["algorithm"]) + + headers = request["headers"] + body = request["body"] + + if location_name in headers: + # If the header is already set by the customer, skip calculation + return + + headers["Transfer-Encoding"] = "chunked" + if "Content-Encoding" in headers: + # We need to preserve the existing content encoding and add + # aws-chunked as a new content encoding. + headers["Content-Encoding"] += ",aws-chunked" + else: + headers["Content-Encoding"] = "aws-chunked" + headers["X-Amz-Trailer"] = location_name + _register_checksum_algorithm_feature_id(algorithm) + + content_length = determine_content_length(body) + if content_length is not None: + # Send the decoded content length if we can determine it. Some + # services such as S3 may require the decoded content length + headers["X-Amz-Decoded-Content-Length"] = str(content_length) + + if "Content-Length" in headers: + del headers["Content-Length"] + logger.debug( + "Removing the Content-Length header since 'chunked' is specified for Transfer-Encoding." + ) + + if isinstance(body, (bytes, bytearray)): + body = io.BytesIO(body) + + request["body"] = AwsChunkedWrapper( + body, + checksum_cls=checksum_cls, + checksum_name=location_name, + ) + + +def _register_checksum_algorithm_feature_id(algorithm): + checksum_algorithm_name = algorithm["algorithm"].upper() + if checksum_algorithm_name == "CRC64NVME": + checksum_algorithm_name = "CRC64" + checksum_algorithm_name_feature_id = ( + f"FLEXIBLE_CHECKSUMS_REQ_{checksum_algorithm_name}" + ) + register_feature_id(checksum_algorithm_name_feature_id) + + +def resolve_response_checksum_algorithms( + request, operation_model, params, supported_algorithms=None +): + http_checksum = operation_model.http_checksum + mode_member = http_checksum.get("requestValidationModeMember") + if mode_member and mode_member in params: + if supported_algorithms is None: + supported_algorithms = _SUPPORTED_CHECKSUM_ALGORITHMS + response_algorithms = { + a.lower() for a in http_checksum.get("responseAlgorithms", []) + } + + usable_algorithms = [] + for algorithm in _ALGORITHMS_PRIORITY_LIST: + if algorithm not in response_algorithms: + continue + if algorithm in supported_algorithms: + usable_algorithms.append(algorithm) + + checksum_context = request["context"].get("checksum", {}) + checksum_context["response_algorithms"] = usable_algorithms + request["context"]["checksum"] = checksum_context + + +def handle_checksum_body(http_response, response, context, operation_model): + headers = response["headers"] + checksum_context = context.get("checksum", {}) + algorithms = checksum_context.get("response_algorithms") + + if not algorithms: + return + + for algorithm in algorithms: + header_name = f"x-amz-checksum-{algorithm}" + # If the header is not found, check the next algorithm + if header_name not in headers: + continue + + # If a - is in the checksum this is not valid Base64. S3 returns + # checksums that include a -# suffix to indicate a checksum derived + # from the hash of all part checksums. We cannot wrap this response + if "-" in headers[header_name]: + continue + + if operation_model.has_streaming_output: + response["body"] = _handle_streaming_response( + http_response, response, algorithm + ) + else: + response["body"] = _handle_bytes_response( + http_response, response, algorithm + ) + + # Expose metadata that the checksum check actually occurred + checksum_context = response["context"].get("checksum", {}) + checksum_context["response_algorithm"] = algorithm + response["context"]["checksum"] = checksum_context + return + + logger.debug( + 'Skipping checksum validation. Response did not contain one of the following algorithms: %s.', + algorithms, + ) + + +def _handle_streaming_response(http_response, response, algorithm): + checksum_cls = _CHECKSUM_CLS.get(algorithm) + header_name = f"x-amz-checksum-{algorithm}" + return StreamingChecksumBody( + http_response.raw, + response["headers"].get("content-length"), + checksum_cls(), + response["headers"][header_name], + ) + + +def _handle_bytes_response(http_response, response, algorithm): + body = http_response.content + header_name = f"x-amz-checksum-{algorithm}" + checksum_cls = _CHECKSUM_CLS.get(algorithm) + checksum = checksum_cls() + checksum.update(body) + expected = response["headers"][header_name] + if checksum.digest() != base64.b64decode(expected): + error_msg = ( + f"Expected checksum {expected} did not match calculated " + f"checksum: {checksum.b64digest()}" + ) + raise FlexibleChecksumError(error_msg=error_msg) + return body + + +_CHECKSUM_CLS = { + "crc32": Crc32Checksum, + "sha1": Sha1Checksum, + "sha256": Sha256Checksum, +} +_CRT_CHECKSUM_ALGORITHMS = ["crc32", "crc32c", "crc64nvme"] +if HAS_CRT: + # Use CRT checksum implementations if available + _CRT_CHECKSUM_CLS = { + "crc32": CrtCrc32Checksum, + "crc32c": CrtCrc32cChecksum, + } + + if has_minimum_crt_version((0, 23, 4)): + # CRC64NVME support wasn't officially added until 0.23.4 + _CRT_CHECKSUM_CLS["crc64nvme"] = CrtCrc64NvmeChecksum + + _CHECKSUM_CLS.update(_CRT_CHECKSUM_CLS) + # Validate this list isn't out of sync with _CRT_CHECKSUM_CLS keys + assert all( + name in _CRT_CHECKSUM_ALGORITHMS for name in _CRT_CHECKSUM_CLS.keys() + ) +_SUPPORTED_CHECKSUM_ALGORITHMS = list(_CHECKSUM_CLS.keys()) +_ALGORITHMS_PRIORITY_LIST = ['crc64nvme', 'crc32c', 'crc32', 'sha1', 'sha256'] diff --git a/py311/lib/python3.11/site-packages/botocore/httpsession.py b/py311/lib/python3.11/site-packages/botocore/httpsession.py new file mode 100644 index 0000000000000000000000000000000000000000..6d55e0ef7406186c59c956ffc7fa505f312402d5 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/httpsession.py @@ -0,0 +1,512 @@ +import logging +import os +import os.path +import socket +import sys +import warnings +from base64 import b64encode +from concurrent.futures import CancelledError + +from urllib3 import PoolManager, Timeout, proxy_from_url +from urllib3.exceptions import ( + ConnectTimeoutError as URLLib3ConnectTimeoutError, +) +from urllib3.exceptions import ( + LocationParseError, + NewConnectionError, + ProtocolError, + ProxyError, +) +from urllib3.exceptions import ReadTimeoutError as URLLib3ReadTimeoutError +from urllib3.exceptions import SSLError as URLLib3SSLError +from urllib3.util.retry import Retry +from urllib3.util.ssl_ import ( + OP_NO_COMPRESSION, + PROTOCOL_TLS, + OP_NO_SSLv2, + OP_NO_SSLv3, + is_ipaddress, + ssl, +) +from urllib3.util.url import parse_url + +try: + from urllib3.util.ssl_ import OP_NO_TICKET, PROTOCOL_TLS_CLIENT +except ImportError: + # Fallback directly to ssl for version of urllib3 before 1.26. + # They are available in the standard library starting in Python 3.6. + from ssl import OP_NO_TICKET, PROTOCOL_TLS_CLIENT + +try: + # pyopenssl will be removed in urllib3 2.0, we'll fall back to ssl_ at that point. + # This can be removed once our urllib3 floor is raised to >= 2.0. + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=DeprecationWarning) + # Always import the original SSLContext, even if it has been patched + from urllib3.contrib.pyopenssl import ( + orig_util_SSLContext as SSLContext, + ) +except (AttributeError, ImportError): + from urllib3.util.ssl_ import SSLContext + +try: + from urllib3.util.ssl_ import DEFAULT_CIPHERS +except ImportError: + # Defer to system configuration starting with + # urllib3 2.0. This will choose the ciphers provided by + # Openssl 1.1.1+ or secure system defaults. + DEFAULT_CIPHERS = None + +import botocore.awsrequest +from botocore.compat import ( + IPV6_ADDRZ_RE, + ensure_bytes, + filter_ssl_warnings, + unquote, + urlparse, +) +from botocore.exceptions import ( + ConnectionClosedError, + ConnectTimeoutError, + EndpointConnectionError, + HTTPClientError, + InvalidProxiesConfigError, + ProxyConnectionError, + ReadTimeoutError, + SSLError, +) + +filter_ssl_warnings() +logger = logging.getLogger(__name__) +DEFAULT_TIMEOUT = 60 +MAX_POOL_CONNECTIONS = 10 +DEFAULT_CA_BUNDLE = os.path.join(os.path.dirname(__file__), 'cacert.pem') + +try: + from certifi import where +except ImportError: + + def where(): + return DEFAULT_CA_BUNDLE + + +def get_cert_path(verify): + if verify is not True: + return verify + + cert_path = where() + logger.debug("Certificate path: %s", cert_path) + + return cert_path + + +def create_urllib3_context( + ssl_version=None, cert_reqs=None, options=None, ciphers=None +): + """This function is a vendored version of the same function in urllib3 + + We vendor this function to ensure that the SSL contexts we construct + always use the std lib SSLContext instead of pyopenssl. + """ + # PROTOCOL_TLS is deprecated in Python 3.10 + if not ssl_version or ssl_version == PROTOCOL_TLS: + ssl_version = PROTOCOL_TLS_CLIENT + + context = SSLContext(ssl_version) + + if ciphers: + context.set_ciphers(ciphers) + elif DEFAULT_CIPHERS: + context.set_ciphers(DEFAULT_CIPHERS) + + # Setting the default here, as we may have no ssl module on import + cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs + + if options is None: + options = 0 + # SSLv2 is easily broken and is considered harmful and dangerous + options |= OP_NO_SSLv2 + # SSLv3 has several problems and is now dangerous + options |= OP_NO_SSLv3 + # Disable compression to prevent CRIME attacks for OpenSSL 1.0+ + # (issue urllib3#309) + options |= OP_NO_COMPRESSION + # TLSv1.2 only. Unless set explicitly, do not request tickets. + # This may save some bandwidth on wire, and although the ticket is encrypted, + # there is a risk associated with it being on wire, + # if the server is not rotating its ticketing keys properly. + options |= OP_NO_TICKET + + context.options |= options + + # Enable post-handshake authentication for TLS 1.3, see GH #1634. PHA is + # necessary for conditional client cert authentication with TLS 1.3. + # The attribute is None for OpenSSL <= 1.1.0 or does not exist in older + # versions of Python. We only enable on Python 3.7.4+ or if certificate + # verification is enabled to work around Python issue #37428 + # See: https://bugs.python.org/issue37428 + if ( + cert_reqs == ssl.CERT_REQUIRED or sys.version_info >= (3, 7, 4) + ) and getattr(context, "post_handshake_auth", None) is not None: + context.post_handshake_auth = True + + def disable_check_hostname(): + if ( + getattr(context, "check_hostname", None) is not None + ): # Platform-specific: Python 3.2 + # We do our own verification, including fingerprints and alternative + # hostnames. So disable it here + context.check_hostname = False + + # The order of the below lines setting verify_mode and check_hostname + # matter due to safe-guards SSLContext has to prevent an SSLContext with + # check_hostname=True, verify_mode=NONE/OPTIONAL. This is made even more + # complex because we don't know whether PROTOCOL_TLS_CLIENT will be used + # or not so we don't know the initial state of the freshly created SSLContext. + if cert_reqs == ssl.CERT_REQUIRED: + context.verify_mode = cert_reqs + disable_check_hostname() + else: + disable_check_hostname() + context.verify_mode = cert_reqs + + # Enable logging of TLS session keys via defacto standard environment variable + # 'SSLKEYLOGFILE', if the feature is available (Python 3.8+). Skip empty values. + if hasattr(context, "keylog_filename"): + sslkeylogfile = os.environ.get("SSLKEYLOGFILE") + if sslkeylogfile and not sys.flags.ignore_environment: + context.keylog_filename = sslkeylogfile + + return context + + +def ensure_boolean(val): + """Ensures a boolean value if a string or boolean is provided + + For strings, the value for True/False is case insensitive + """ + if isinstance(val, bool): + return val + else: + return val.lower() == 'true' + + +def mask_proxy_url(proxy_url): + """ + Mask proxy url credentials. + + :type proxy_url: str + :param proxy_url: The proxy url, i.e. https://username:password@proxy.com + + :return: Masked proxy url, i.e. https://***:***@proxy.com + """ + mask = '*' * 3 + parsed_url = urlparse(proxy_url) + if parsed_url.username: + proxy_url = proxy_url.replace(parsed_url.username, mask, 1) + if parsed_url.password: + proxy_url = proxy_url.replace(parsed_url.password, mask, 1) + return proxy_url + + +def _is_ipaddress(host): + """Wrap urllib3's is_ipaddress to support bracketed IPv6 addresses.""" + return is_ipaddress(host) or bool(IPV6_ADDRZ_RE.match(host)) + + +class ProxyConfiguration: + """Represents a proxy configuration dictionary and additional settings. + + This class represents a proxy configuration dictionary and provides utility + functions to retrieve well structured proxy urls and proxy headers from the + proxy configuration dictionary. + """ + + def __init__(self, proxies=None, proxies_settings=None): + if proxies is None: + proxies = {} + if proxies_settings is None: + proxies_settings = {} + + self._proxies = proxies + self._proxies_settings = proxies_settings + + def proxy_url_for(self, url): + """Retrieves the corresponding proxy url for a given url.""" + parsed_url = urlparse(url) + proxy = self._proxies.get(parsed_url.scheme) + if proxy: + proxy = self._fix_proxy_url(proxy) + return proxy + + def proxy_headers_for(self, proxy_url): + """Retrieves the corresponding proxy headers for a given proxy url.""" + headers = {} + username, password = self._get_auth_from_url(proxy_url) + if username and password: + basic_auth = self._construct_basic_auth(username, password) + headers['Proxy-Authorization'] = basic_auth + return headers + + @property + def settings(self): + return self._proxies_settings + + def _fix_proxy_url(self, proxy_url): + if proxy_url.startswith('http:') or proxy_url.startswith('https:'): + return proxy_url + elif proxy_url.startswith('//'): + return 'http:' + proxy_url + else: + return 'http://' + proxy_url + + def _construct_basic_auth(self, username, password): + auth_str = f'{username}:{password}' + encoded_str = b64encode(auth_str.encode('ascii')).strip().decode() + return f'Basic {encoded_str}' + + def _get_auth_from_url(self, url): + parsed_url = urlparse(url) + try: + return unquote(parsed_url.username), unquote(parsed_url.password) + except (AttributeError, TypeError): + return None, None + + +class URLLib3Session: + """A basic HTTP client that supports connection pooling and proxies. + + This class is inspired by requests.adapters.HTTPAdapter, but has been + boiled down to meet the use cases needed by botocore. For the most part + this classes matches the functionality of HTTPAdapter in requests v2.7.0 + (the same as our vendored version). The only major difference of note is + that we currently do not support sending chunked requests. While requests + v2.7.0 implemented this themselves, later version urllib3 support this + directly via a flag to urlopen so enabling it if needed should be trivial. + """ + + def __init__( + self, + verify=True, + proxies=None, + timeout=None, + max_pool_connections=MAX_POOL_CONNECTIONS, + socket_options=None, + client_cert=None, + proxies_config=None, + ): + self._verify = verify + self._proxy_config = ProxyConfiguration( + proxies=proxies, proxies_settings=proxies_config + ) + self._pool_classes_by_scheme = { + 'http': botocore.awsrequest.AWSHTTPConnectionPool, + 'https': botocore.awsrequest.AWSHTTPSConnectionPool, + } + if timeout is None: + timeout = DEFAULT_TIMEOUT + if not isinstance(timeout, (int, float)): + timeout = Timeout(connect=timeout[0], read=timeout[1]) + + self._cert_file = None + self._key_file = None + if isinstance(client_cert, str): + self._cert_file = client_cert + elif isinstance(client_cert, tuple): + self._cert_file, self._key_file = client_cert + + self._timeout = timeout + self._max_pool_connections = max_pool_connections + self._socket_options = socket_options + if socket_options is None: + self._socket_options = [] + self._proxy_managers = {} + self._manager = PoolManager(**self._get_pool_manager_kwargs()) + self._manager.pool_classes_by_scheme = self._pool_classes_by_scheme + + def _proxies_kwargs(self, **kwargs): + proxies_settings = self._proxy_config.settings + proxies_kwargs = { + 'use_forwarding_for_https': proxies_settings.get( + 'proxy_use_forwarding_for_https' + ), + **kwargs, + } + return {k: v for k, v in proxies_kwargs.items() if v is not None} + + def _get_pool_manager_kwargs(self, **extra_kwargs): + pool_manager_kwargs = { + 'timeout': self._timeout, + 'maxsize': self._max_pool_connections, + 'ssl_context': self._get_ssl_context(), + 'socket_options': self._socket_options, + 'cert_file': self._cert_file, + 'key_file': self._key_file, + } + pool_manager_kwargs.update(**extra_kwargs) + return pool_manager_kwargs + + def _get_ssl_context(self): + return create_urllib3_context() + + def _get_proxy_manager(self, proxy_url): + if proxy_url not in self._proxy_managers: + proxy_headers = self._proxy_config.proxy_headers_for(proxy_url) + proxy_ssl_context = self._setup_proxy_ssl_context(proxy_url) + proxy_manager_kwargs = self._get_pool_manager_kwargs( + proxy_headers=proxy_headers + ) + proxy_manager_kwargs.update( + self._proxies_kwargs(proxy_ssl_context=proxy_ssl_context) + ) + proxy_manager = proxy_from_url(proxy_url, **proxy_manager_kwargs) + proxy_manager.pool_classes_by_scheme = self._pool_classes_by_scheme + self._proxy_managers[proxy_url] = proxy_manager + + return self._proxy_managers[proxy_url] + + def _path_url(self, url): + parsed_url = urlparse(url) + path = parsed_url.path + if not path: + path = '/' + if parsed_url.query: + path = path + '?' + parsed_url.query + return path + + def _setup_ssl_cert(self, conn, url, verify): + if url.lower().startswith('https') and verify: + conn.cert_reqs = 'CERT_REQUIRED' + conn.ca_certs = get_cert_path(verify) + else: + conn.cert_reqs = 'CERT_NONE' + conn.ca_certs = None + + def _setup_proxy_ssl_context(self, proxy_url): + proxies_settings = self._proxy_config.settings + proxy_ca_bundle = proxies_settings.get('proxy_ca_bundle') + proxy_cert = proxies_settings.get('proxy_client_cert') + if proxy_ca_bundle is None and proxy_cert is None: + return None + + context = self._get_ssl_context() + try: + url = parse_url(proxy_url) + # urllib3 disables this by default but we need it for proper + # proxy tls negotiation when proxy_url is not an IP Address + if not _is_ipaddress(url.host): + context.check_hostname = True + if proxy_ca_bundle is not None: + context.load_verify_locations(cafile=proxy_ca_bundle) + + if isinstance(proxy_cert, tuple): + context.load_cert_chain(proxy_cert[0], keyfile=proxy_cert[1]) + elif isinstance(proxy_cert, str): + context.load_cert_chain(proxy_cert) + + return context + except (OSError, URLLib3SSLError, LocationParseError) as e: + raise InvalidProxiesConfigError(error=e) + + def _get_connection_manager(self, url, proxy_url=None): + if proxy_url: + manager = self._get_proxy_manager(proxy_url) + else: + manager = self._manager + return manager + + def _get_request_target(self, url, proxy_url): + has_proxy = proxy_url is not None + + if not has_proxy: + return self._path_url(url) + + # HTTP proxies expect the request_target to be the absolute url to know + # which host to establish a connection to. urllib3 also supports + # forwarding for HTTPS through the 'use_forwarding_for_https' parameter. + proxy_scheme = urlparse(proxy_url).scheme + using_https_forwarding_proxy = ( + proxy_scheme == 'https' + and self._proxies_kwargs().get('use_forwarding_for_https', False) + ) + + if using_https_forwarding_proxy or url.startswith('http:'): + return url + else: + return self._path_url(url) + + def _chunked(self, headers): + transfer_encoding = headers.get('Transfer-Encoding', b'') + transfer_encoding = ensure_bytes(transfer_encoding) + return transfer_encoding.lower() == b'chunked' + + def close(self): + self._manager.clear() + for manager in self._proxy_managers.values(): + manager.clear() + + def send(self, request): + try: + proxy_url = self._proxy_config.proxy_url_for(request.url) + manager = self._get_connection_manager(request.url, proxy_url) + conn = manager.connection_from_url(request.url) + self._setup_ssl_cert(conn, request.url, self._verify) + if ensure_boolean( + os.environ.get('BOTO_EXPERIMENTAL__ADD_PROXY_HOST_HEADER', '') + ): + # This is currently an "experimental" feature which provides + # no guarantees of backwards compatibility. It may be subject + # to change or removal in any patch version. Anyone opting in + # to this feature should strictly pin botocore. + host = urlparse(request.url).hostname + conn.proxy_headers['host'] = host + + request_target = self._get_request_target(request.url, proxy_url) + urllib_response = conn.urlopen( + method=request.method, + url=request_target, + body=request.body, + headers=request.headers, + retries=Retry(False), + assert_same_host=False, + preload_content=False, + decode_content=False, + chunked=self._chunked(request.headers), + ) + + http_response = botocore.awsrequest.AWSResponse( + request.url, + urllib_response.status, + urllib_response.headers, + urllib_response, + ) + + if not request.stream_output: + # Cause the raw stream to be exhausted immediately. We do it + # this way instead of using preload_content because + # preload_content will never buffer chunked responses + http_response.content + + return http_response + except URLLib3SSLError as e: + raise SSLError(endpoint_url=request.url, error=e) + except (NewConnectionError, socket.gaierror) as e: + raise EndpointConnectionError(endpoint_url=request.url, error=e) + except ProxyError as e: + raise ProxyConnectionError( + proxy_url=mask_proxy_url(proxy_url), error=e + ) + except URLLib3ConnectTimeoutError as e: + raise ConnectTimeoutError(endpoint_url=request.url, error=e) + except URLLib3ReadTimeoutError as e: + raise ReadTimeoutError(endpoint_url=request.url, error=e) + except ProtocolError as e: + raise ConnectionClosedError( + error=e, request=request, endpoint_url=request.url + ) + except CancelledError: + raise + except Exception as e: + message = 'Exception received when sending urllib3 HTTP request' + logger.debug(message, exc_info=True) + raise HTTPClientError(error=e) diff --git a/py311/lib/python3.11/site-packages/botocore/loaders.py b/py311/lib/python3.11/site-packages/botocore/loaders.py new file mode 100644 index 0000000000000000000000000000000000000000..f5072a3e5f508d321cb4ba18976494cbd9657e96 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/loaders.py @@ -0,0 +1,525 @@ +# Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +"""Module for loading various model files. + +This module provides the classes that are used to load models used +by botocore. This can include: + + * Service models (e.g. the model for EC2, S3, DynamoDB, etc.) + * Service model extras which customize the service models + * Other models associated with a service (pagination, waiters) + * Non service-specific config (Endpoint data, retry config) + +Loading a module is broken down into several steps: + + * Determining the path to load + * Search the data_path for files to load + * The mechanics of loading the file + * Searching for extras and applying them to the loaded file + +The last item is used so that other faster loading mechanism +besides the default JSON loader can be used. + +The Search Path +=============== + +Similar to how the PATH environment variable is to finding executables +and the PYTHONPATH environment variable is to finding python modules +to import, the botocore loaders have the concept of a data path exposed +through AWS_DATA_PATH. + +This enables end users to provide additional search paths where we +will attempt to load models outside of the models we ship with +botocore. When you create a ``Loader``, there are two paths +automatically added to the model search path: + + * /data/ + * ~/.aws/models + +The first value is the path where all the model files shipped with +botocore are located. + +The second path is so that users can just drop new model files in +``~/.aws/models`` without having to mess around with the AWS_DATA_PATH. + +The AWS_DATA_PATH using the platform specific path separator to +separate entries (typically ``:`` on linux and ``;`` on windows). + + +Directory Layout +================ + +The Loader expects a particular directory layout. In order for any +directory specified in AWS_DATA_PATH to be considered, it must have +this structure for service models:: + + + | + |-- servicename1 + | |-- 2012-10-25 + | |-- service-2.json + |-- ec2 + | |-- 2014-01-01 + | | |-- paginators-1.json + | | |-- service-2.json + | | |-- waiters-2.json + | |-- 2015-03-01 + | |-- paginators-1.json + | |-- service-2.json + | |-- waiters-2.json + | |-- service-2.sdk-extras.json + + +That is: + + * The root directory contains sub directories that are the name + of the services. + * Within each service directory, there's a sub directory for each + available API version. + * Within each API version, there are model specific files, including + (but not limited to): service-2.json, waiters-2.json, paginators-1.json + +The ``-1`` and ``-2`` suffix at the end of the model files denote which version +schema is used within the model. Even though this information is available in +the ``version`` key within the model, this version is also part of the filename +so that code does not need to load the JSON model in order to determine which +version to use. + +The ``sdk-extras`` and similar files represent extra data that needs to be +applied to the model after it is loaded. Data in these files might represent +information that doesn't quite fit in the original models, but is still needed +for the sdk. For instance, additional operation parameters might be added here +which don't represent the actual service api. +""" + +import logging +import os + +from botocore import BOTOCORE_ROOT +from botocore.compat import HAS_GZIP, OrderedDict, json +from botocore.exceptions import DataNotFoundError, UnknownServiceError +from botocore.utils import deep_merge + +_JSON_OPEN_METHODS = { + '.json': open, +} + + +if HAS_GZIP: + from gzip import open as gzip_open + + _JSON_OPEN_METHODS['.json.gz'] = gzip_open + + +logger = logging.getLogger(__name__) + + +def instance_cache(func): + """Cache the result of a method on a per instance basis. + + This is not a general purpose caching decorator. In order + for this to be used, it must be used on methods on an + instance, and that instance *must* provide a + ``self._cache`` dictionary. + + """ + + def _wrapper(self, *args, **kwargs): + key = (func.__name__,) + args + for pair in sorted(kwargs.items()): + key += pair + if key in self._cache: + return self._cache[key] + data = func(self, *args, **kwargs) + self._cache[key] = data + return data + + return _wrapper + + +class JSONFileLoader: + """Loader JSON files. + + This class can load the default format of models, which is a JSON file. + + """ + + def exists(self, file_path): + """Checks if the file exists. + + :type file_path: str + :param file_path: The full path to the file to load without + the '.json' extension. + + :return: True if file path exists, False otherwise. + + """ + for ext in _JSON_OPEN_METHODS: + if os.path.isfile(file_path + ext): + return True + return False + + def _load_file(self, full_path, open_method): + if not os.path.isfile(full_path): + return + + # By default the file will be opened with locale encoding on Python 3. + # We specify "utf8" here to ensure the correct behavior. + with open_method(full_path, 'rb') as fp: + payload = fp.read().decode('utf-8') + + logger.debug("Loading JSON file: %s", full_path) + return json.loads(payload, object_pairs_hook=OrderedDict) + + def load_file(self, file_path): + """Attempt to load the file path. + + :type file_path: str + :param file_path: The full path to the file to load without + the '.json' extension. + + :return: The loaded data if it exists, otherwise None. + + """ + for ext, open_method in _JSON_OPEN_METHODS.items(): + data = self._load_file(file_path + ext, open_method) + if data is not None: + return data + return None + + +def create_loader(search_path_string=None): + """Create a Loader class. + + This factory function creates a loader given a search string path. + + :type search_string_path: str + :param search_string_path: The AWS_DATA_PATH value. A string + of data path values separated by the ``os.path.pathsep`` value, + which is typically ``:`` on POSIX platforms and ``;`` on + windows. + + :return: A ``Loader`` instance. + + """ + if search_path_string is None: + return Loader() + paths = [] + extra_paths = search_path_string.split(os.pathsep) + for path in extra_paths: + path = os.path.expanduser(os.path.expandvars(path)) + paths.append(path) + return Loader(extra_search_paths=paths) + + +class Loader: + """Find and load data models. + + This class will handle searching for and loading data models. + + The main method used here is ``load_service_model``, which is a + convenience method over ``load_data`` and ``determine_latest_version``. + + """ + + FILE_LOADER_CLASS = JSONFileLoader + # The included models in botocore/data/ that we ship with botocore. + BUILTIN_DATA_PATH = os.path.join(BOTOCORE_ROOT, 'data') + # For convenience we automatically add ~/.aws/models to the data path. + CUSTOMER_DATA_PATH = os.path.join( + os.path.expanduser('~'), '.aws', 'models' + ) + BUILTIN_EXTRAS_TYPES = ['sdk'] + + def __init__( + self, + extra_search_paths=None, + file_loader=None, + cache=None, + include_default_search_paths=True, + include_default_extras=True, + ): + self._cache = {} + if file_loader is None: + file_loader = self.FILE_LOADER_CLASS() + self.file_loader = file_loader + if extra_search_paths is not None: + self._search_paths = extra_search_paths + else: + self._search_paths = [] + if include_default_search_paths: + self._search_paths.extend( + [self.CUSTOMER_DATA_PATH, self.BUILTIN_DATA_PATH] + ) + + self._extras_types = [] + if include_default_extras: + self._extras_types.extend(self.BUILTIN_EXTRAS_TYPES) + + self._extras_processor = ExtrasProcessor() + + @property + def search_paths(self): + return self._search_paths + + @property + def extras_types(self): + return self._extras_types + + @instance_cache + def list_available_services(self, type_name): + """List all known services. + + This will traverse the search path and look for all known + services. + + :type type_name: str + :param type_name: The type of the service (service-2, + paginators-1, waiters-2, etc). This is needed because + the list of available services depends on the service + type. For example, the latest API version available for + a resource-1.json file may not be the latest API version + available for a services-2.json file. + + :return: A list of all services. The list of services will + be sorted. + + """ + services = set() + for possible_path in self._potential_locations(): + # Any directory in the search path is potentially a service. + # We'll collect any initial list of potential services, + # but we'll then need to further process these directories + # by searching for the corresponding type_name in each + # potential directory. + possible_services = [ + d + for d in os.listdir(possible_path) + if os.path.isdir(os.path.join(possible_path, d)) + ] + for service_name in possible_services: + full_dirname = os.path.join(possible_path, service_name) + api_versions = os.listdir(full_dirname) + for api_version in api_versions: + full_load_path = os.path.join( + full_dirname, api_version, type_name + ) + if self.file_loader.exists(full_load_path): + services.add(service_name) + break + return sorted(services) + + @instance_cache + def determine_latest_version(self, service_name, type_name): + """Find the latest API version available for a service. + + :type service_name: str + :param service_name: The name of the service. + + :type type_name: str + :param type_name: The type of the service (service-2, + paginators-1, waiters-2, etc). This is needed because + the latest API version available can depend on the service + type. For example, the latest API version available for + a resource-1.json file may not be the latest API version + available for a services-2.json file. + + :rtype: str + :return: The latest API version. If the service does not exist + or does not have any available API data, then a + ``DataNotFoundError`` exception will be raised. + + """ + return max(self.list_api_versions(service_name, type_name)) + + @instance_cache + def list_api_versions(self, service_name, type_name): + """List all API versions available for a particular service type + + :type service_name: str + :param service_name: The name of the service + + :type type_name: str + :param type_name: The type name for the service (i.e service-2, + paginators-1, etc.) + + :rtype: list + :return: A list of API version strings in sorted order. + + """ + known_api_versions = set() + for possible_path in self._potential_locations( + service_name, must_exist=True, is_dir=True + ): + for dirname in os.listdir(possible_path): + full_path = os.path.join(possible_path, dirname, type_name) + # Only add to the known_api_versions if the directory + # contains a service-2, paginators-1, etc. file corresponding + # to the type_name passed in. + if self.file_loader.exists(full_path): + known_api_versions.add(dirname) + if not known_api_versions: + raise DataNotFoundError(data_path=service_name) + return sorted(known_api_versions) + + @instance_cache + def load_service_model(self, service_name, type_name, api_version=None): + """Load a botocore service model + + This is the main method for loading botocore models (e.g. a service + model, pagination configs, waiter configs, etc.). + + :type service_name: str + :param service_name: The name of the service (e.g ``ec2``, ``s3``). + + :type type_name: str + :param type_name: The model type. Valid types include, but are not + limited to: ``service-2``, ``paginators-1``, ``waiters-2``. + + :type api_version: str + :param api_version: The API version to load. If this is not + provided, then the latest API version will be used. + + :type load_extras: bool + :param load_extras: Whether or not to load the tool extras which + contain additional data to be added to the model. + + :raises: UnknownServiceError if there is no known service with + the provided service_name. + + :raises: DataNotFoundError if no data could be found for the + service_name/type_name/api_version. + + :return: The loaded data, as a python type (e.g. dict, list, etc). + """ + # Wrapper around the load_data. This will calculate the path + # to call load_data with. + known_services = self.list_available_services(type_name) + if service_name not in known_services: + raise UnknownServiceError( + service_name=service_name, + known_service_names=', '.join(sorted(known_services)), + ) + if api_version is None: + api_version = self.determine_latest_version( + service_name, type_name + ) + full_path = os.path.join(service_name, api_version, type_name) + model = self.load_data(full_path) + + # Load in all the extras + extras_data = self._find_extras(service_name, type_name, api_version) + self._extras_processor.process(model, extras_data) + + return model + + def _find_extras(self, service_name, type_name, api_version): + """Creates an iterator over all the extras data.""" + for extras_type in self.extras_types: + extras_name = f'{type_name}.{extras_type}-extras' + full_path = os.path.join(service_name, api_version, extras_name) + + try: + yield self.load_data(full_path) + except DataNotFoundError: + pass + + @instance_cache + def load_data_with_path(self, name): + """Same as ``load_data`` but returns file path as second return value. + + :type name: str + :param name: The data path, i.e ``ec2/2015-03-01/service-2``. + + :return: Tuple of the loaded data and the path to the data file + where the data was loaded from. If no data could be found then a + DataNotFoundError is raised. + """ + for possible_path in self._potential_locations(name): + found = self.file_loader.load_file(possible_path) + if found is not None: + return found, possible_path + + # We didn't find anything that matched on any path. + raise DataNotFoundError(data_path=name) + + def load_data(self, name): + """Load data given a data path. + + This is a low level method that will search through the various + search paths until it's able to load a value. This is typically + only needed to load *non* model files (such as _endpoints and + _retry). If you need to load model files, you should prefer + ``load_service_model``. Use ``load_data_with_path`` to get the + data path of the data file as second return value. + + :type name: str + :param name: The data path, i.e ``ec2/2015-03-01/service-2``. + + :return: The loaded data. If no data could be found then + a DataNotFoundError is raised. + """ + data, _ = self.load_data_with_path(name) + return data + + def _potential_locations(self, name=None, must_exist=False, is_dir=False): + # Will give an iterator over the full path of potential locations + # according to the search path. + for path in self.search_paths: + if os.path.isdir(path): + full_path = path + if name is not None: + full_path = os.path.join(path, name) + if not must_exist: + yield full_path + else: + if is_dir and os.path.isdir(full_path): + yield full_path + elif os.path.exists(full_path): + yield full_path + + def is_builtin_path(self, path): + """Whether a given path is within the package's data directory. + + This method can be used together with load_data_with_path(name) + to determine if data has been loaded from a file bundled with the + package, as opposed to a file in a separate location. + + :type path: str + :param path: The file path to check. + + :return: Whether the given path is within the package's data directory. + """ + path = os.path.expanduser(os.path.expandvars(path)) + return path.startswith(self.BUILTIN_DATA_PATH) + + +class ExtrasProcessor: + """Processes data from extras files into service models.""" + + def process(self, original_model, extra_models): + """Processes data from a list of loaded extras files into a model + + :type original_model: dict + :param original_model: The service model to load all the extras into. + + :type extra_models: iterable of dict + :param extra_models: A list of loaded extras models. + """ + for extras in extra_models: + self._process(original_model, extras) + + def _process(self, model, extra_model): + """Process a single extras model into a service model.""" + if 'merge' in extra_model: + deep_merge(model, extra_model['merge']) diff --git a/py311/lib/python3.11/site-packages/botocore/model.py b/py311/lib/python3.11/site-packages/botocore/model.py new file mode 100644 index 0000000000000000000000000000000000000000..b198652df3d5da3c22f6410c101a2265d8ee81ba --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/model.py @@ -0,0 +1,1006 @@ +# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +"""Abstractions to interact with service models.""" + +from collections import defaultdict +from typing import NamedTuple, Union + +from botocore.auth import resolve_auth_type +from botocore.compat import OrderedDict +from botocore.exceptions import ( + MissingServiceIdError, + UndefinedModelAttributeError, + UnsupportedServiceProtocolsError, +) +from botocore.utils import ( + PRIORITY_ORDERED_SUPPORTED_PROTOCOLS, + CachedProperty, + hyphenize_service_id, + instance_cache, +) + +NOT_SET = object() + + +class NoShapeFoundError(Exception): + pass + + +class InvalidShapeError(Exception): + pass + + +class OperationNotFoundError(Exception): + pass + + +class InvalidShapeReferenceError(Exception): + pass + + +class ServiceId(str): + def hyphenize(self): + return hyphenize_service_id(self) + + +class Shape: + """Object representing a shape from the service model.""" + + # To simplify serialization logic, all shape params that are + # related to serialization are moved from the top level hash into + # a 'serialization' hash. This list below contains the names of all + # the attributes that should be moved. + SERIALIZED_ATTRS = [ + 'locationName', + 'queryName', + 'flattened', + 'location', + 'payload', + 'streaming', + 'timestampFormat', + 'xmlNamespace', + 'resultWrapper', + 'xmlAttribute', + 'eventstream', + 'event', + 'eventheader', + 'eventpayload', + 'jsonvalue', + 'timestampFormat', + 'hostLabel', + ] + METADATA_ATTRS = [ + 'required', + 'min', + 'max', + 'pattern', + 'sensitive', + 'enum', + 'idempotencyToken', + 'error', + 'exception', + 'endpointdiscoveryid', + 'retryable', + 'document', + 'union', + 'contextParam', + 'clientContextParams', + 'requiresLength', + ] + MAP_TYPE = OrderedDict + + def __init__(self, shape_name, shape_model, shape_resolver=None): + """ + + :type shape_name: string + :param shape_name: The name of the shape. + + :type shape_model: dict + :param shape_model: The shape model. This would be the value + associated with the key in the "shapes" dict of the + service model (i.e ``model['shapes'][shape_name]``) + + :type shape_resolver: botocore.model.ShapeResolver + :param shape_resolver: A shape resolver object. This is used to + resolve references to other shapes. For scalar shape types + (string, integer, boolean, etc.), this argument is not + required. If a shape_resolver is not provided for a complex + type, then a ``ValueError`` will be raised when an attempt + to resolve a shape is made. + + """ + self.name = shape_name + self.type_name = shape_model['type'] + self.documentation = shape_model.get('documentation', '') + self._shape_model = shape_model + if shape_resolver is None: + # If a shape_resolver is not provided, we create an object + # that will throw errors if you attempt to resolve + # a shape. This is actually ok for scalar shapes + # because they don't need to resolve shapes and shouldn't + # be required to provide an object they won't use. + shape_resolver = UnresolvableShapeMap() + self._shape_resolver = shape_resolver + self._cache = {} + + @CachedProperty + def serialization(self): + """Serialization information about the shape. + + This contains information that may be needed for input serialization + or response parsing. This can include: + + * name + * queryName + * flattened + * location + * payload + * streaming + * xmlNamespace + * resultWrapper + * xmlAttribute + * jsonvalue + * timestampFormat + + :rtype: dict + :return: Serialization information about the shape. + + """ + model = self._shape_model + serialization = {} + for attr in self.SERIALIZED_ATTRS: + if attr in self._shape_model: + serialization[attr] = model[attr] + # For consistency, locationName is renamed to just 'name'. + if 'locationName' in serialization: + serialization['name'] = serialization.pop('locationName') + return serialization + + @CachedProperty + def metadata(self): + """Metadata about the shape. + + This requires optional information about the shape, including: + + * min + * max + * pattern + * enum + * sensitive + * required + * idempotencyToken + * document + * union + * contextParam + * clientContextParams + * requiresLength + + :rtype: dict + :return: Metadata about the shape. + + """ + model = self._shape_model + metadata = {} + for attr in self.METADATA_ATTRS: + if attr in self._shape_model: + metadata[attr] = model[attr] + return metadata + + @CachedProperty + def required_members(self): + """A list of members that are required. + + A structure shape can define members that are required. + This value will return a list of required members. If there + are no required members an empty list is returned. + + """ + return self.metadata.get('required', []) + + def _resolve_shape_ref(self, shape_ref): + return self._shape_resolver.resolve_shape_ref(shape_ref) + + def __repr__(self): + return f"<{self.__class__.__name__}({self.name})>" + + @property + def event_stream_name(self): + return None + + +class StructureShape(Shape): + @CachedProperty + def members(self): + members = self._shape_model.get('members', self.MAP_TYPE()) + # The members dict looks like: + # 'members': { + # 'MemberName': {'shape': 'shapeName'}, + # 'MemberName2': {'shape': 'shapeName'}, + # } + # We return a dict of member name to Shape object. + shape_members = self.MAP_TYPE() + for name, shape_ref in members.items(): + shape_members[name] = self._resolve_shape_ref(shape_ref) + return shape_members + + @CachedProperty + def event_stream_name(self): + for member_name, member in self.members.items(): + if member.serialization.get('eventstream'): + return member_name + return None + + @CachedProperty + def error_code(self): + if not self.metadata.get('exception', False): + return None + error_metadata = self.metadata.get("error", {}) + code = error_metadata.get("code") + if code: + return code + # Use the exception name if there is no explicit code modeled + return self.name + + @CachedProperty + def is_document_type(self): + return self.metadata.get('document', False) + + @CachedProperty + def is_tagged_union(self): + return self.metadata.get('union', False) + + +class ListShape(Shape): + @CachedProperty + def member(self): + return self._resolve_shape_ref(self._shape_model['member']) + + +class MapShape(Shape): + @CachedProperty + def key(self): + return self._resolve_shape_ref(self._shape_model['key']) + + @CachedProperty + def value(self): + return self._resolve_shape_ref(self._shape_model['value']) + + +class StringShape(Shape): + @CachedProperty + def enum(self): + return self.metadata.get('enum', []) + + +class StaticContextParameter(NamedTuple): + name: str + value: Union[bool, str] + + +class ContextParameter(NamedTuple): + name: str + member_name: str + + +class ClientContextParameter(NamedTuple): + name: str + type: str + documentation: str + + +class ServiceModel: + """ + + :ivar service_description: The parsed service description dictionary. + + """ + + def __init__(self, service_description, service_name=None): + """ + + :type service_description: dict + :param service_description: The service description model. This value + is obtained from a botocore.loader.Loader, or from directly loading + the file yourself:: + + service_description = json.load( + open('/path/to/service-description-model.json')) + model = ServiceModel(service_description) + + :type service_name: str + :param service_name: The name of the service. Normally this is + the endpoint prefix defined in the service_description. However, + you can override this value to provide a more convenient name. + This is done in a few places in botocore (ses instead of email, + emr instead of elasticmapreduce). If this value is not provided, + it will default to the endpointPrefix defined in the model. + + """ + self._service_description = service_description + # We want clients to be able to access metadata directly. + self.metadata = service_description.get('metadata', {}) + self._shape_resolver = ShapeResolver( + service_description.get('shapes', {}) + ) + self._signature_version = NOT_SET + self._service_name = service_name + self._instance_cache = {} + + def shape_for(self, shape_name, member_traits=None): + return self._shape_resolver.get_shape_by_name( + shape_name, member_traits + ) + + def shape_for_error_code(self, error_code): + return self._error_code_cache.get(error_code, None) + + @CachedProperty + def _error_code_cache(self): + error_code_cache = {} + for error_shape in self.error_shapes: + code = error_shape.error_code + error_code_cache[code] = error_shape + return error_code_cache + + def resolve_shape_ref(self, shape_ref): + return self._shape_resolver.resolve_shape_ref(shape_ref) + + @CachedProperty + def shape_names(self): + return list(self._service_description.get('shapes', {})) + + @CachedProperty + def error_shapes(self): + error_shapes = [] + for shape_name in self.shape_names: + error_shape = self.shape_for(shape_name) + if error_shape.metadata.get('exception', False): + error_shapes.append(error_shape) + return error_shapes + + @instance_cache + def operation_model(self, operation_name): + try: + model = self._service_description['operations'][operation_name] + except KeyError: + raise OperationNotFoundError(operation_name) + return OperationModel(model, self, operation_name) + + @CachedProperty + def documentation(self): + return self._service_description.get('documentation', '') + + @CachedProperty + def operation_names(self): + return list(self._service_description.get('operations', [])) + + @CachedProperty + def service_name(self): + """The name of the service. + + This defaults to the endpointPrefix defined in the service model. + However, this value can be overriden when a ``ServiceModel`` is + created. If a service_name was not provided when the ``ServiceModel`` + was created and if there is no endpointPrefix defined in the + service model, then an ``UndefinedModelAttributeError`` exception + will be raised. + + """ + if self._service_name is not None: + return self._service_name + else: + return self.endpoint_prefix + + @CachedProperty + def service_id(self): + try: + return ServiceId(self._get_metadata_property('serviceId')) + except UndefinedModelAttributeError: + raise MissingServiceIdError(service_name=self._service_name) + + @CachedProperty + def signing_name(self): + """The name to use when computing signatures. + + If the model does not define a signing name, this + value will be the endpoint prefix defined in the model. + """ + signing_name = self.metadata.get('signingName') + if signing_name is None: + signing_name = self.endpoint_prefix + return signing_name + + @CachedProperty + def api_version(self): + return self._get_metadata_property('apiVersion') + + @CachedProperty + def protocol(self): + return self._get_metadata_property('protocol') + + @CachedProperty + def protocols(self): + return self._get_metadata_property('protocols') + + @CachedProperty + def resolved_protocol(self): + # We need to ensure `protocols` exists in the metadata before attempting to + # access it directly since referencing service_model.protocols directly will + # raise an UndefinedModelAttributeError if protocols is not defined + if self.metadata.get('protocols'): + for protocol in PRIORITY_ORDERED_SUPPORTED_PROTOCOLS: + if protocol in self.protocols: + return protocol + raise UnsupportedServiceProtocolsError( + botocore_supported_protocols=PRIORITY_ORDERED_SUPPORTED_PROTOCOLS, + service_supported_protocols=self.protocols, + service=self.service_name, + ) + # If a service does not have a `protocols` trait, fall back to the legacy + # `protocol` trait + return self.protocol + + @CachedProperty + def endpoint_prefix(self): + return self._get_metadata_property('endpointPrefix') + + @CachedProperty + def endpoint_discovery_operation(self): + for operation in self.operation_names: + model = self.operation_model(operation) + if model.is_endpoint_discovery_operation: + return model + + @CachedProperty + def endpoint_discovery_required(self): + for operation in self.operation_names: + model = self.operation_model(operation) + if ( + model.endpoint_discovery is not None + and model.endpoint_discovery.get('required') + ): + return True + return False + + @CachedProperty + def client_context_parameters(self): + params = self._service_description.get('clientContextParams', {}) + return [ + ClientContextParameter( + name=param_name, + type=param_val['type'], + documentation=param_val['documentation'], + ) + for param_name, param_val in params.items() + ] + + def _get_metadata_property(self, name): + try: + return self.metadata[name] + except KeyError: + raise UndefinedModelAttributeError( + f'"{name}" not defined in the metadata of the model: {self}' + ) + + # Signature version is one of the rare properties + # that can be modified so a CachedProperty is not used here. + + @property + def signature_version(self): + if self._signature_version is NOT_SET: + signature_version = self.metadata.get('signatureVersion') + self._signature_version = signature_version + return self._signature_version + + @signature_version.setter + def signature_version(self, value): + self._signature_version = value + + @CachedProperty + def is_query_compatible(self): + return 'awsQueryCompatible' in self.metadata + + def __repr__(self): + return f'{self.__class__.__name__}({self.service_name})' + + +class OperationModel: + def __init__(self, operation_model, service_model, name=None): + """ + + :type operation_model: dict + :param operation_model: The operation model. This comes from the + service model, and is the value associated with the operation + name in the service model (i.e ``model['operations'][op_name]``). + + :type service_model: botocore.model.ServiceModel + :param service_model: The service model associated with the operation. + + :type name: string + :param name: The operation name. This is the operation name exposed to + the users of this model. This can potentially be different from + the "wire_name", which is the operation name that *must* by + provided over the wire. For example, given:: + + "CreateCloudFrontOriginAccessIdentity":{ + "name":"CreateCloudFrontOriginAccessIdentity2014_11_06", + ... + } + + The ``name`` would be ``CreateCloudFrontOriginAccessIdentity``, + but the ``self.wire_name`` would be + ``CreateCloudFrontOriginAccessIdentity2014_11_06``, which is the + value we must send in the corresponding HTTP request. + + """ + self._operation_model = operation_model + self._service_model = service_model + self._api_name = name + # Clients can access '.name' to get the operation name + # and '.metadata' to get the top level metdata of the service. + self._wire_name = operation_model.get('name') + self.metadata = service_model.metadata + self.http = operation_model.get('http', {}) + + @CachedProperty + def name(self): + if self._api_name is not None: + return self._api_name + else: + return self.wire_name + + @property + def wire_name(self): + """The wire name of the operation. + + In many situations this is the same value as the + ``name``, value, but in some services, the operation name + exposed to the user is different from the operation name + we send across the wire (e.g cloudfront). + + Any serialization code should use ``wire_name``. + + """ + return self._operation_model.get('name') + + @property + def service_model(self): + return self._service_model + + @CachedProperty + def documentation(self): + return self._operation_model.get('documentation', '') + + @CachedProperty + def deprecated(self): + return self._operation_model.get('deprecated', False) + + @CachedProperty + def endpoint_discovery(self): + # Explicit None default. An empty dictionary for this trait means it is + # enabled but not required to be used. + return self._operation_model.get('endpointdiscovery', None) + + @CachedProperty + def is_endpoint_discovery_operation(self): + return self._operation_model.get('endpointoperation', False) + + @CachedProperty + def input_shape(self): + if 'input' not in self._operation_model: + # Some operations do not accept any input and do not define an + # input shape. + return None + return self._service_model.resolve_shape_ref( + self._operation_model['input'] + ) + + @CachedProperty + def output_shape(self): + if 'output' not in self._operation_model: + # Some operations do not define an output shape, + # in which case we return None to indicate the + # operation has no expected output. + return None + return self._service_model.resolve_shape_ref( + self._operation_model['output'] + ) + + @CachedProperty + def idempotent_members(self): + input_shape = self.input_shape + if not input_shape: + return [] + + return [ + name + for (name, shape) in input_shape.members.items() + if 'idempotencyToken' in shape.metadata + and shape.metadata['idempotencyToken'] + ] + + @CachedProperty + def static_context_parameters(self): + params = self._operation_model.get('staticContextParams', {}) + return [ + StaticContextParameter(name=name, value=props.get('value')) + for name, props in params.items() + ] + + @CachedProperty + def context_parameters(self): + if not self.input_shape: + return [] + + return [ + ContextParameter( + name=shape.metadata['contextParam']['name'], + member_name=name, + ) + for name, shape in self.input_shape.members.items() + if 'contextParam' in shape.metadata + and 'name' in shape.metadata['contextParam'] + ] + + @CachedProperty + def operation_context_parameters(self): + return self._operation_model.get('operationContextParams', []) + + @CachedProperty + def request_compression(self): + return self._operation_model.get('requestcompression') + + @CachedProperty + def auth(self): + return self._operation_model.get('auth') + + @CachedProperty + def auth_type(self): + return self._operation_model.get('authtype') + + @CachedProperty + def resolved_auth_type(self): + if self.auth: + return resolve_auth_type(self.auth) + return self.auth_type + + @CachedProperty + def unsigned_payload(self): + return self._operation_model.get('unsignedPayload') + + @CachedProperty + def error_shapes(self): + shapes = self._operation_model.get("errors", []) + return list(self._service_model.resolve_shape_ref(s) for s in shapes) + + @CachedProperty + def endpoint(self): + return self._operation_model.get('endpoint') + + @CachedProperty + def http_checksum_required(self): + return self._operation_model.get('httpChecksumRequired', False) + + @CachedProperty + def http_checksum(self): + return self._operation_model.get('httpChecksum', {}) + + @CachedProperty + def has_event_stream_input(self): + return self.get_event_stream_input() is not None + + @CachedProperty + def has_event_stream_output(self): + return self.get_event_stream_output() is not None + + def get_event_stream_input(self): + return self._get_event_stream(self.input_shape) + + def get_event_stream_output(self): + return self._get_event_stream(self.output_shape) + + def _get_event_stream(self, shape): + """Returns the event stream member's shape if any or None otherwise.""" + if shape is None: + return None + event_name = shape.event_stream_name + if event_name: + return shape.members[event_name] + return None + + @CachedProperty + def has_streaming_input(self): + return self.get_streaming_input() is not None + + @CachedProperty + def has_streaming_output(self): + return self.get_streaming_output() is not None + + def get_streaming_input(self): + return self._get_streaming_body(self.input_shape) + + def get_streaming_output(self): + return self._get_streaming_body(self.output_shape) + + def _get_streaming_body(self, shape): + """Returns the streaming member's shape if any; or None otherwise.""" + if shape is None: + return None + payload = shape.serialization.get('payload') + if payload is not None: + payload_shape = shape.members[payload] + if payload_shape.type_name == 'blob': + return payload_shape + return None + + def __repr__(self): + return f'{self.__class__.__name__}(name={self.name})' + + +class ShapeResolver: + """Resolves shape references.""" + + # Any type not in this mapping will default to the Shape class. + SHAPE_CLASSES = { + 'structure': StructureShape, + 'list': ListShape, + 'map': MapShape, + 'string': StringShape, + } + + def __init__(self, shape_map): + self._shape_map = shape_map + self._shape_cache = {} + + def get_shape_by_name(self, shape_name, member_traits=None): + try: + shape_model = self._shape_map[shape_name] + except KeyError: + raise NoShapeFoundError(shape_name) + try: + shape_cls = self.SHAPE_CLASSES.get(shape_model['type'], Shape) + except KeyError: + raise InvalidShapeError( + f"Shape is missing required key 'type': {shape_model}" + ) + if member_traits: + shape_model = shape_model.copy() + shape_model.update(member_traits) + result = shape_cls(shape_name, shape_model, self) + return result + + def resolve_shape_ref(self, shape_ref): + # A shape_ref is a dict that has a 'shape' key that + # refers to a shape name as well as any additional + # member traits that are then merged over the shape + # definition. For example: + # {"shape": "StringType", "locationName": "Foobar"} + if len(shape_ref) == 1 and 'shape' in shape_ref: + # It's just a shape ref with no member traits, we can avoid + # a .copy(). This is the common case so it's specifically + # called out here. + return self.get_shape_by_name(shape_ref['shape']) + else: + member_traits = shape_ref.copy() + try: + shape_name = member_traits.pop('shape') + except KeyError: + raise InvalidShapeReferenceError( + f"Invalid model, missing shape reference: {shape_ref}" + ) + return self.get_shape_by_name(shape_name, member_traits) + + +class UnresolvableShapeMap: + """A ShapeResolver that will throw ValueErrors when shapes are resolved.""" + + def get_shape_by_name(self, shape_name, member_traits=None): + raise ValueError( + f"Attempted to lookup shape '{shape_name}', but no shape map was provided." + ) + + def resolve_shape_ref(self, shape_ref): + raise ValueError( + f"Attempted to resolve shape '{shape_ref}', but no shape " + f"map was provided." + ) + + +class DenormalizedStructureBuilder: + """Build a StructureShape from a denormalized model. + + This is a convenience builder class that makes it easy to construct + ``StructureShape``s based on a denormalized model. + + It will handle the details of creating unique shape names and creating + the appropriate shape map needed by the ``StructureShape`` class. + + Example usage:: + + builder = DenormalizedStructureBuilder() + shape = builder.with_members({ + 'A': { + 'type': 'structure', + 'members': { + 'B': { + 'type': 'structure', + 'members': { + 'C': { + 'type': 'string', + } + } + } + } + } + }).build_model() + # ``shape`` is now an instance of botocore.model.StructureShape + + :type dict_type: class + :param dict_type: The dictionary type to use, allowing you to opt-in + to using OrderedDict or another dict type. This can + be particularly useful for testing when order + matters, such as for documentation. + + """ + + SCALAR_TYPES = ( + 'string', + 'integer', + 'boolean', + 'blob', + 'float', + 'timestamp', + 'long', + 'double', + 'char', + ) + + def __init__(self, name=None): + self.members = OrderedDict() + self._name_generator = ShapeNameGenerator() + if name is None: + self.name = self._name_generator.new_shape_name('structure') + + def with_members(self, members): + """ + + :type members: dict + :param members: The denormalized members. + + :return: self + + """ + self._members = members + return self + + def build_model(self): + """Build the model based on the provided members. + + :rtype: botocore.model.StructureShape + :return: The built StructureShape object. + + """ + shapes = OrderedDict() + denormalized = { + 'type': 'structure', + 'members': self._members, + } + self._build_model(denormalized, shapes, self.name) + resolver = ShapeResolver(shape_map=shapes) + return StructureShape( + shape_name=self.name, + shape_model=shapes[self.name], + shape_resolver=resolver, + ) + + def _build_model(self, model, shapes, shape_name): + if model['type'] == 'structure': + shapes[shape_name] = self._build_structure(model, shapes) + elif model['type'] == 'list': + shapes[shape_name] = self._build_list(model, shapes) + elif model['type'] == 'map': + shapes[shape_name] = self._build_map(model, shapes) + elif model['type'] in self.SCALAR_TYPES: + shapes[shape_name] = self._build_scalar(model) + else: + raise InvalidShapeError(f"Unknown shape type: {model['type']}") + + def _build_structure(self, model, shapes): + members = OrderedDict() + shape = self._build_initial_shape(model) + shape['members'] = members + + for name, member_model in model.get('members', OrderedDict()).items(): + member_shape_name = self._get_shape_name(member_model) + members[name] = {'shape': member_shape_name} + self._build_model(member_model, shapes, member_shape_name) + return shape + + def _build_list(self, model, shapes): + member_shape_name = self._get_shape_name(model) + shape = self._build_initial_shape(model) + shape['member'] = {'shape': member_shape_name} + self._build_model(model['member'], shapes, member_shape_name) + return shape + + def _build_map(self, model, shapes): + key_shape_name = self._get_shape_name(model['key']) + value_shape_name = self._get_shape_name(model['value']) + shape = self._build_initial_shape(model) + shape['key'] = {'shape': key_shape_name} + shape['value'] = {'shape': value_shape_name} + self._build_model(model['key'], shapes, key_shape_name) + self._build_model(model['value'], shapes, value_shape_name) + return shape + + def _build_initial_shape(self, model): + shape = { + 'type': model['type'], + } + if 'documentation' in model: + shape['documentation'] = model['documentation'] + for attr in Shape.METADATA_ATTRS: + if attr in model: + shape[attr] = model[attr] + return shape + + def _build_scalar(self, model): + return self._build_initial_shape(model) + + def _get_shape_name(self, model): + if 'shape_name' in model: + return model['shape_name'] + else: + return self._name_generator.new_shape_name(model['type']) + + +class ShapeNameGenerator: + """Generate unique shape names for a type. + + This class can be used in conjunction with the DenormalizedStructureBuilder + to generate unique shape names for a given type. + + """ + + def __init__(self): + self._name_cache = defaultdict(int) + + def new_shape_name(self, type_name): + """Generate a unique shape name. + + This method will guarantee a unique shape name each time it is + called with the same type. + + :: + + >>> s = ShapeNameGenerator() + >>> s.new_shape_name('structure') + 'StructureType1' + >>> s.new_shape_name('structure') + 'StructureType2' + >>> s.new_shape_name('list') + 'ListType1' + >>> s.new_shape_name('list') + 'ListType2' + + + :type type_name: string + :param type_name: The type name (structure, list, map, string, etc.) + + :rtype: string + :return: A unique shape name for the given type + + """ + self._name_cache[type_name] += 1 + current_index = self._name_cache[type_name] + return f'{type_name.capitalize()}Type{current_index}' diff --git a/py311/lib/python3.11/site-packages/botocore/monitoring.py b/py311/lib/python3.11/site-packages/botocore/monitoring.py new file mode 100644 index 0000000000000000000000000000000000000000..d57cf0ae6670e52c56d20e8f4835d711e82d2019 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/monitoring.py @@ -0,0 +1,586 @@ +# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import json +import logging +import re +import time + +from botocore.compat import ensure_bytes, ensure_unicode, urlparse +from botocore.retryhandler import EXCEPTION_MAP as RETRYABLE_EXCEPTIONS + +logger = logging.getLogger(__name__) + + +class Monitor: + _EVENTS_TO_REGISTER = [ + 'before-parameter-build', + 'request-created', + 'response-received', + 'after-call', + 'after-call-error', + ] + + def __init__(self, adapter, publisher): + """Abstraction for monitoring clients API calls + + :param adapter: An adapter that takes event emitter events + and produces monitor events + + :param publisher: A publisher for generated monitor events + """ + self._adapter = adapter + self._publisher = publisher + + def register(self, event_emitter): + """Register an event emitter to the monitor""" + for event_to_register in self._EVENTS_TO_REGISTER: + event_emitter.register_last(event_to_register, self.capture) + + def capture(self, event_name, **payload): + """Captures an incoming event from the event emitter + + It will feed an event emitter event to the monitor's adaptor to create + a monitor event and then publish that event to the monitor's publisher. + """ + try: + monitor_event = self._adapter.feed(event_name, payload) + if monitor_event: + self._publisher.publish(monitor_event) + except Exception as e: + logger.debug( + 'Exception %s raised by client monitor in handling event %s', + e, + event_name, + exc_info=True, + ) + + +class MonitorEventAdapter: + def __init__(self, time=time.time): + """Adapts event emitter events to produce monitor events + + :type time: callable + :param time: A callable that produces the current time + """ + self._time = time + + def feed(self, emitter_event_name, emitter_payload): + """Feed an event emitter event to generate a monitor event + + :type emitter_event_name: str + :param emitter_event_name: The name of the event emitted + + :type emitter_payload: dict + :param emitter_payload: The payload to associated to the event + emitted + + :rtype: BaseMonitorEvent + :returns: A monitor event based on the event emitter events + fired + """ + return self._get_handler(emitter_event_name)(**emitter_payload) + + def _get_handler(self, event_name): + return getattr( + self, '_handle_' + event_name.split('.')[0].replace('-', '_') + ) + + def _handle_before_parameter_build(self, model, context, **kwargs): + context['current_api_call_event'] = APICallEvent( + service=model.service_model.service_id, + operation=model.wire_name, + timestamp=self._get_current_time(), + ) + + def _handle_request_created(self, request, **kwargs): + context = request.context + new_attempt_event = context[ + 'current_api_call_event' + ].new_api_call_attempt(timestamp=self._get_current_time()) + new_attempt_event.request_headers = request.headers + new_attempt_event.url = request.url + context['current_api_call_attempt_event'] = new_attempt_event + + def _handle_response_received( + self, parsed_response, context, exception, **kwargs + ): + attempt_event = context.pop('current_api_call_attempt_event') + attempt_event.latency = self._get_latency(attempt_event) + if parsed_response is not None: + attempt_event.http_status_code = parsed_response[ + 'ResponseMetadata' + ]['HTTPStatusCode'] + attempt_event.response_headers = parsed_response[ + 'ResponseMetadata' + ]['HTTPHeaders'] + attempt_event.parsed_error = parsed_response.get('Error') + else: + attempt_event.wire_exception = exception + return attempt_event + + def _handle_after_call(self, context, parsed, **kwargs): + context['current_api_call_event'].retries_exceeded = parsed[ + 'ResponseMetadata' + ].get('MaxAttemptsReached', False) + return self._complete_api_call(context) + + def _handle_after_call_error(self, context, exception, **kwargs): + # If the after-call-error was emitted and the error being raised + # was a retryable connection error, then the retries must have exceeded + # for that exception as this event gets emitted **after** retries + # happen. + context[ + 'current_api_call_event' + ].retries_exceeded = self._is_retryable_exception(exception) + return self._complete_api_call(context) + + def _is_retryable_exception(self, exception): + return isinstance( + exception, tuple(RETRYABLE_EXCEPTIONS['GENERAL_CONNECTION_ERROR']) + ) + + def _complete_api_call(self, context): + call_event = context.pop('current_api_call_event') + call_event.latency = self._get_latency(call_event) + return call_event + + def _get_latency(self, event): + return self._get_current_time() - event.timestamp + + def _get_current_time(self): + return int(self._time() * 1000) + + +class BaseMonitorEvent: + def __init__(self, service, operation, timestamp): + """Base monitor event + + :type service: str + :param service: A string identifying the service associated to + the event + + :type operation: str + :param operation: A string identifying the operation of service + associated to the event + + :type timestamp: int + :param timestamp: Epoch time in milliseconds from when the event began + """ + self.service = service + self.operation = operation + self.timestamp = timestamp + + def __repr__(self): + return f'{self.__class__.__name__}({self.__dict__!r})' + + def __eq__(self, other): + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + +class APICallEvent(BaseMonitorEvent): + def __init__( + self, + service, + operation, + timestamp, + latency=None, + attempts=None, + retries_exceeded=False, + ): + """Monitor event for a single API call + + This event corresponds to a single client method call, which includes + every HTTP requests attempt made in order to complete the client call + + :type service: str + :param service: A string identifying the service associated to + the event + + :type operation: str + :param operation: A string identifying the operation of service + associated to the event + + :type timestamp: int + :param timestamp: Epoch time in milliseconds from when the event began + + :type latency: int + :param latency: The time in milliseconds to complete the client call + + :type attempts: list + :param attempts: The list of APICallAttempts associated to the + APICall + + :type retries_exceeded: bool + :param retries_exceeded: True if API call exceeded retries. False + otherwise + """ + super().__init__( + service=service, operation=operation, timestamp=timestamp + ) + self.latency = latency + self.attempts = attempts + if attempts is None: + self.attempts = [] + self.retries_exceeded = retries_exceeded + + def new_api_call_attempt(self, timestamp): + """Instantiates APICallAttemptEvent associated to the APICallEvent + + :type timestamp: int + :param timestamp: Epoch time in milliseconds to associate to the + APICallAttemptEvent + """ + attempt_event = APICallAttemptEvent( + service=self.service, operation=self.operation, timestamp=timestamp + ) + self.attempts.append(attempt_event) + return attempt_event + + +class APICallAttemptEvent(BaseMonitorEvent): + def __init__( + self, + service, + operation, + timestamp, + latency=None, + url=None, + http_status_code=None, + request_headers=None, + response_headers=None, + parsed_error=None, + wire_exception=None, + ): + """Monitor event for a single API call attempt + + This event corresponds to a single HTTP request attempt in completing + the entire client method call. + + :type service: str + :param service: A string identifying the service associated to + the event + + :type operation: str + :param operation: A string identifying the operation of service + associated to the event + + :type timestamp: int + :param timestamp: Epoch time in milliseconds from when the HTTP request + started + + :type latency: int + :param latency: The time in milliseconds to complete the HTTP request + whether it succeeded or failed + + :type url: str + :param url: The URL the attempt was sent to + + :type http_status_code: int + :param http_status_code: The HTTP status code of the HTTP response + if there was a response + + :type request_headers: dict + :param request_headers: The HTTP headers sent in making the HTTP + request + + :type response_headers: dict + :param response_headers: The HTTP headers returned in the HTTP response + if there was a response + + :type parsed_error: dict + :param parsed_error: The error parsed if the service returned an + error back + + :type wire_exception: Exception + :param wire_exception: The exception raised in sending the HTTP + request (i.e. ConnectionError) + """ + super().__init__( + service=service, operation=operation, timestamp=timestamp + ) + self.latency = latency + self.url = url + self.http_status_code = http_status_code + self.request_headers = request_headers + self.response_headers = response_headers + self.parsed_error = parsed_error + self.wire_exception = wire_exception + + +class CSMSerializer: + _MAX_CLIENT_ID_LENGTH = 255 + _MAX_EXCEPTION_CLASS_LENGTH = 128 + _MAX_ERROR_CODE_LENGTH = 128 + _MAX_USER_AGENT_LENGTH = 256 + _MAX_MESSAGE_LENGTH = 512 + _RESPONSE_HEADERS_TO_EVENT_ENTRIES = { + 'x-amzn-requestid': 'XAmznRequestId', + 'x-amz-request-id': 'XAmzRequestId', + 'x-amz-id-2': 'XAmzId2', + } + _AUTH_REGEXS = { + 'v4': re.compile( + r'AWS4-HMAC-SHA256 ' + r'Credential=(?P\w+)/\d+/' + r'(?P[a-z0-9-]+)/' + ), + 's3': re.compile(r'AWS (?P\w+):'), + } + _SERIALIZEABLE_EVENT_PROPERTIES = [ + 'service', + 'operation', + 'timestamp', + 'attempts', + 'latency', + 'retries_exceeded', + 'url', + 'request_headers', + 'http_status_code', + 'response_headers', + 'parsed_error', + 'wire_exception', + ] + + def __init__(self, csm_client_id): + """Serializes monitor events to CSM (Client Side Monitoring) format + + :type csm_client_id: str + :param csm_client_id: The application identifier to associate + to the serialized events + """ + self._validate_client_id(csm_client_id) + self.csm_client_id = csm_client_id + + def _validate_client_id(self, csm_client_id): + if len(csm_client_id) > self._MAX_CLIENT_ID_LENGTH: + raise ValueError( + f'The value provided for csm_client_id: {csm_client_id} exceeds ' + f'the maximum length of {self._MAX_CLIENT_ID_LENGTH} characters' + ) + + def serialize(self, event): + """Serializes a monitor event to the CSM format + + :type event: BaseMonitorEvent + :param event: The event to serialize to bytes + + :rtype: bytes + :returns: The CSM serialized form of the event + """ + event_dict = self._get_base_event_dict(event) + event_type = self._get_event_type(event) + event_dict['Type'] = event_type + for attr in self._SERIALIZEABLE_EVENT_PROPERTIES: + value = getattr(event, attr, None) + if value is not None: + getattr(self, '_serialize_' + attr)( + value, event_dict, event_type=event_type + ) + return ensure_bytes(json.dumps(event_dict, separators=(',', ':'))) + + def _get_base_event_dict(self, event): + return { + 'Version': 1, + 'ClientId': self.csm_client_id, + } + + def _serialize_service(self, service, event_dict, **kwargs): + event_dict['Service'] = service + + def _serialize_operation(self, operation, event_dict, **kwargs): + event_dict['Api'] = operation + + def _serialize_timestamp(self, timestamp, event_dict, **kwargs): + event_dict['Timestamp'] = timestamp + + def _serialize_attempts(self, attempts, event_dict, **kwargs): + event_dict['AttemptCount'] = len(attempts) + if attempts: + self._add_fields_from_last_attempt(event_dict, attempts[-1]) + + def _add_fields_from_last_attempt(self, event_dict, last_attempt): + if last_attempt.request_headers: + # It does not matter which attempt to use to grab the region + # for the ApiCall event, but SDKs typically do the last one. + region = self._get_region(last_attempt.request_headers) + if region is not None: + event_dict['Region'] = region + event_dict['UserAgent'] = self._get_user_agent( + last_attempt.request_headers + ) + if last_attempt.http_status_code is not None: + event_dict['FinalHttpStatusCode'] = last_attempt.http_status_code + if last_attempt.parsed_error is not None: + self._serialize_parsed_error( + last_attempt.parsed_error, event_dict, 'ApiCall' + ) + if last_attempt.wire_exception is not None: + self._serialize_wire_exception( + last_attempt.wire_exception, event_dict, 'ApiCall' + ) + + def _serialize_latency(self, latency, event_dict, event_type): + if event_type == 'ApiCall': + event_dict['Latency'] = latency + elif event_type == 'ApiCallAttempt': + event_dict['AttemptLatency'] = latency + + def _serialize_retries_exceeded( + self, retries_exceeded, event_dict, **kwargs + ): + event_dict['MaxRetriesExceeded'] = 1 if retries_exceeded else 0 + + def _serialize_url(self, url, event_dict, **kwargs): + event_dict['Fqdn'] = urlparse(url).netloc + + def _serialize_request_headers( + self, request_headers, event_dict, **kwargs + ): + event_dict['UserAgent'] = self._get_user_agent(request_headers) + if self._is_signed(request_headers): + event_dict['AccessKey'] = self._get_access_key(request_headers) + region = self._get_region(request_headers) + if region is not None: + event_dict['Region'] = region + if 'X-Amz-Security-Token' in request_headers: + event_dict['SessionToken'] = request_headers[ + 'X-Amz-Security-Token' + ] + + def _serialize_http_status_code( + self, http_status_code, event_dict, **kwargs + ): + event_dict['HttpStatusCode'] = http_status_code + + def _serialize_response_headers( + self, response_headers, event_dict, **kwargs + ): + for header, entry in self._RESPONSE_HEADERS_TO_EVENT_ENTRIES.items(): + if header in response_headers: + event_dict[entry] = response_headers[header] + + def _serialize_parsed_error( + self, parsed_error, event_dict, event_type, **kwargs + ): + field_prefix = 'Final' if event_type == 'ApiCall' else '' + event_dict[field_prefix + 'AwsException'] = self._truncate( + parsed_error['Code'], self._MAX_ERROR_CODE_LENGTH + ) + event_dict[field_prefix + 'AwsExceptionMessage'] = self._truncate( + parsed_error['Message'], self._MAX_MESSAGE_LENGTH + ) + + def _serialize_wire_exception( + self, wire_exception, event_dict, event_type, **kwargs + ): + field_prefix = 'Final' if event_type == 'ApiCall' else '' + event_dict[field_prefix + 'SdkException'] = self._truncate( + wire_exception.__class__.__name__, self._MAX_EXCEPTION_CLASS_LENGTH + ) + event_dict[field_prefix + 'SdkExceptionMessage'] = self._truncate( + str(wire_exception), self._MAX_MESSAGE_LENGTH + ) + + def _get_event_type(self, event): + if isinstance(event, APICallEvent): + return 'ApiCall' + elif isinstance(event, APICallAttemptEvent): + return 'ApiCallAttempt' + + def _get_access_key(self, request_headers): + auth_val = self._get_auth_value(request_headers) + _, auth_match = self._get_auth_match(auth_val) + return auth_match.group('access_key') + + def _get_region(self, request_headers): + if not self._is_signed(request_headers): + return None + auth_val = self._get_auth_value(request_headers) + signature_version, auth_match = self._get_auth_match(auth_val) + if signature_version != 'v4': + return None + return auth_match.group('signing_region') + + def _get_user_agent(self, request_headers): + return self._truncate( + ensure_unicode(request_headers.get('User-Agent', '')), + self._MAX_USER_AGENT_LENGTH, + ) + + def _is_signed(self, request_headers): + return 'Authorization' in request_headers + + def _get_auth_value(self, request_headers): + return ensure_unicode(request_headers['Authorization']) + + def _get_auth_match(self, auth_val): + for signature_version, regex in self._AUTH_REGEXS.items(): + match = regex.match(auth_val) + if match: + return signature_version, match + return None, None + + def _truncate(self, text, max_length): + if len(text) > max_length: + logger.debug( + 'Truncating following value to maximum length of %s: %s', + text, + max_length, + ) + return text[:max_length] + return text + + +class SocketPublisher: + _MAX_MONITOR_EVENT_LENGTH = 8 * 1024 + + def __init__(self, socket, host, port, serializer): + """Publishes monitor events to a socket + + :type socket: socket.socket + :param socket: The socket object to use to publish events + + :type host: string + :param host: The host to send events to + + :type port: integer + :param port: The port on the host to send events to + + :param serializer: The serializer to use to serialize the event + to a form that can be published to the socket. This must + have a `serialize()` method that accepts a monitor event + and return bytes + """ + self._socket = socket + self._address = (host, port) + self._serializer = serializer + + def publish(self, event): + """Publishes a specified monitor event + + :type event: BaseMonitorEvent + :param event: The monitor event to be sent + over the publisher's socket to the desired address. + """ + serialized_event = self._serializer.serialize(event) + if len(serialized_event) > self._MAX_MONITOR_EVENT_LENGTH: + logger.debug( + 'Serialized event of size %s exceeds the maximum length ' + 'allowed: %s. Not sending event to socket.', + len(serialized_event), + self._MAX_MONITOR_EVENT_LENGTH, + ) + return + self._socket.sendto(serialized_event, self._address) diff --git a/py311/lib/python3.11/site-packages/botocore/paginate.py b/py311/lib/python3.11/site-packages/botocore/paginate.py new file mode 100644 index 0000000000000000000000000000000000000000..63b4a29f809db5994397ca6c4b1310a100d32fed --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/paginate.py @@ -0,0 +1,728 @@ +# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +import base64 +import json +import logging +from functools import partial +from itertools import tee + +import jmespath + +from botocore.context import with_current_context +from botocore.exceptions import PaginationError +from botocore.useragent import register_feature_id +from botocore.utils import merge_dicts, set_value_from_jmespath + +log = logging.getLogger(__name__) + + +class TokenEncoder: + """Encodes dictionaries into opaque strings. + + This for the most part json dumps + base64 encoding, but also supports + having bytes in the dictionary in addition to the types that json can + handle by default. + + This is intended for use in encoding pagination tokens, which in some + cases can be complex structures and / or contain bytes. + """ + + def encode(self, token): + """Encodes a dictionary to an opaque string. + + :type token: dict + :param token: A dictionary containing pagination information, + particularly the service pagination token(s) but also other boto + metadata. + + :rtype: str + :returns: An opaque string + """ + try: + # Try just using json dumps first to avoid having to traverse + # and encode the dict. In 99.9999% of cases this will work. + json_string = json.dumps(token) + except (TypeError, UnicodeDecodeError): + # If normal dumping failed, go through and base64 encode all bytes. + encoded_token, encoded_keys = self._encode(token, []) + + # Save the list of all the encoded key paths. We can safely + # assume that no service will ever use this key. + encoded_token['boto_encoded_keys'] = encoded_keys + + # Now that the bytes are all encoded, dump the json. + json_string = json.dumps(encoded_token) + + # base64 encode the json string to produce an opaque token string. + return base64.b64encode(json_string.encode('utf-8')).decode('utf-8') + + def _encode(self, data, path): + """Encode bytes in given data, keeping track of the path traversed.""" + if isinstance(data, dict): + return self._encode_dict(data, path) + elif isinstance(data, list): + return self._encode_list(data, path) + elif isinstance(data, bytes): + return self._encode_bytes(data, path) + else: + return data, [] + + def _encode_list(self, data, path): + """Encode any bytes in a list, noting the index of what is encoded.""" + new_data = [] + encoded = [] + for i, value in enumerate(data): + new_path = path + [i] + new_value, new_encoded = self._encode(value, new_path) + new_data.append(new_value) + encoded.extend(new_encoded) + return new_data, encoded + + def _encode_dict(self, data, path): + """Encode any bytes in a dict, noting the index of what is encoded.""" + new_data = {} + encoded = [] + for key, value in data.items(): + new_path = path + [key] + new_value, new_encoded = self._encode(value, new_path) + new_data[key] = new_value + encoded.extend(new_encoded) + return new_data, encoded + + def _encode_bytes(self, data, path): + """Base64 encode a byte string.""" + return base64.b64encode(data).decode('utf-8'), [path] + + +class TokenDecoder: + """Decodes token strings back into dictionaries. + + This performs the inverse operation to the TokenEncoder, accepting + opaque strings and decoding them into a useable form. + """ + + def decode(self, token): + """Decodes an opaque string to a dictionary. + + :type token: str + :param token: A token string given by the botocore pagination + interface. + + :rtype: dict + :returns: A dictionary containing pagination information, + particularly the service pagination token(s) but also other boto + metadata. + """ + json_string = base64.b64decode(token.encode('utf-8')).decode('utf-8') + decoded_token = json.loads(json_string) + + # Remove the encoding metadata as it is read since it will no longer + # be needed. + encoded_keys = decoded_token.pop('boto_encoded_keys', None) + if encoded_keys is None: + return decoded_token + else: + return self._decode(decoded_token, encoded_keys) + + def _decode(self, token, encoded_keys): + """Find each encoded value and decode it.""" + for key in encoded_keys: + encoded = self._path_get(token, key) + decoded = base64.b64decode(encoded.encode('utf-8')) + self._path_set(token, key, decoded) + return token + + def _path_get(self, data, path): + """Return the nested data at the given path. + + For instance: + data = {'foo': ['bar', 'baz']} + path = ['foo', 0] + ==> 'bar' + """ + # jmespath isn't used here because it would be difficult to actually + # create the jmespath query when taking all of the unknowns of key + # structure into account. Gross though this is, it is simple and not + # very error prone. + d = data + for step in path: + d = d[step] + return d + + def _path_set(self, data, path, value): + """Set the value of a key in the given data. + + Example: + data = {'foo': ['bar', 'baz']} + path = ['foo', 1] + value = 'bin' + ==> data = {'foo': ['bar', 'bin']} + """ + container = self._path_get(data, path[:-1]) + container[path[-1]] = value + + +class PaginatorModel: + def __init__(self, paginator_config): + self._paginator_config = paginator_config['pagination'] + + def get_paginator(self, operation_name): + try: + single_paginator_config = self._paginator_config[operation_name] + except KeyError: + raise ValueError( + f"Paginator for operation does not exist: {operation_name}" + ) + return single_paginator_config + + +class PageIterator: + """An iterable object to paginate API results. + Please note it is NOT a python iterator. + Use ``iter`` to wrap this as a generator. + """ + + def __init__( + self, + method, + input_token, + output_token, + more_results, + result_keys, + non_aggregate_keys, + limit_key, + max_items, + starting_token, + page_size, + op_kwargs, + ): + self._method = method + self._input_token = input_token + self._output_token = output_token + self._more_results = more_results + self._result_keys = result_keys + self._max_items = max_items + self._limit_key = limit_key + self._starting_token = starting_token + self._page_size = page_size + self._op_kwargs = op_kwargs + self._resume_token = None + self._non_aggregate_key_exprs = non_aggregate_keys + self._non_aggregate_part = {} + self._token_encoder = TokenEncoder() + self._token_decoder = TokenDecoder() + + @property + def result_keys(self): + return self._result_keys + + @property + def resume_token(self): + """Token to specify to resume pagination.""" + return self._resume_token + + @resume_token.setter + def resume_token(self, value): + if not isinstance(value, dict): + raise ValueError(f"Bad starting token: {value}") + + if 'boto_truncate_amount' in value: + token_keys = sorted(self._input_token + ['boto_truncate_amount']) + else: + token_keys = sorted(self._input_token) + dict_keys = sorted(value.keys()) + + if token_keys == dict_keys: + self._resume_token = self._token_encoder.encode(value) + else: + raise ValueError(f"Bad starting token: {value}") + + @property + def non_aggregate_part(self): + return self._non_aggregate_part + + def __iter__(self): + current_kwargs = self._op_kwargs + previous_next_token = None + next_token = {key: None for key in self._input_token} + if self._starting_token is not None: + # If the starting token exists, populate the next_token with the + # values inside it. This ensures that we have the service's + # pagination token on hand if we need to truncate after the + # first response. + next_token = self._parse_starting_token()[0] + # The number of items from result_key we've seen so far. + total_items = 0 + first_request = True + primary_result_key = self.result_keys[0] + starting_truncation = 0 + self._inject_starting_params(current_kwargs) + while True: + response = self._make_request(current_kwargs) + parsed = self._extract_parsed_response(response) + if first_request: + # The first request is handled differently. We could + # possibly have a resume/starting token that tells us where + # to index into the retrieved page. + if self._starting_token is not None: + starting_truncation = self._handle_first_request( + parsed, primary_result_key, starting_truncation + ) + first_request = False + self._record_non_aggregate_key_values(parsed) + else: + # If this isn't the first request, we have already sliced into + # the first request and had to make additional requests after. + # We no longer need to add this to truncation. + starting_truncation = 0 + current_response = primary_result_key.search(parsed) + if current_response is None: + current_response = [] + num_current_response = len(current_response) + truncate_amount = 0 + if self._max_items is not None: + truncate_amount = ( + total_items + num_current_response - self._max_items + ) + if truncate_amount > 0: + self._truncate_response( + parsed, + primary_result_key, + truncate_amount, + starting_truncation, + next_token, + ) + yield response + break + else: + yield response + total_items += num_current_response + next_token = self._get_next_token(parsed) + if all(t is None for t in next_token.values()): + break + if ( + self._max_items is not None + and total_items == self._max_items + ): + # We're on a page boundary so we can set the current + # next token to be the resume token. + self.resume_token = next_token + break + if ( + previous_next_token is not None + and previous_next_token == next_token + ): + message = ( + f"The same next token was received twice: {next_token}" + ) + raise PaginationError(message=message) + self._inject_token_into_kwargs(current_kwargs, next_token) + previous_next_token = next_token + + def search(self, expression): + """Applies a JMESPath expression to a paginator + + Each page of results is searched using the provided JMESPath + expression. If the result is not a list, it is yielded + directly. If the result is a list, each element in the result + is yielded individually (essentially implementing a flatmap in + which the JMESPath search is the mapping function). + + :type expression: str + :param expression: JMESPath expression to apply to each page. + + :return: Returns an iterator that yields the individual + elements of applying a JMESPath expression to each page of + results. + """ + compiled = jmespath.compile(expression) + for page in self: + results = compiled.search(page) + if isinstance(results, list): + yield from results + else: + # Yield result directly if it is not a list. + yield results + + @with_current_context(partial(register_feature_id, 'PAGINATOR')) + def _make_request(self, current_kwargs): + return self._method(**current_kwargs) + + def _extract_parsed_response(self, response): + return response + + def _record_non_aggregate_key_values(self, response): + non_aggregate_keys = {} + for expression in self._non_aggregate_key_exprs: + result = expression.search(response) + set_value_from_jmespath( + non_aggregate_keys, expression.expression, result + ) + self._non_aggregate_part = non_aggregate_keys + + def _inject_starting_params(self, op_kwargs): + # If the user has specified a starting token we need to + # inject that into the operation's kwargs. + if self._starting_token is not None: + # Don't need to do anything special if there is no starting + # token specified. + next_token = self._parse_starting_token()[0] + self._inject_token_into_kwargs(op_kwargs, next_token) + if self._page_size is not None: + # Pass the page size as the parameter name for limiting + # page size, also known as the limit_key. + op_kwargs[self._limit_key] = self._page_size + + def _inject_token_into_kwargs(self, op_kwargs, next_token): + for name, token in next_token.items(): + if (token is not None) and (token != 'None'): + op_kwargs[name] = token + elif name in op_kwargs: + del op_kwargs[name] + + def _handle_first_request( + self, parsed, primary_result_key, starting_truncation + ): + # If the payload is an array or string, we need to slice into it + # and only return the truncated amount. + starting_truncation = self._parse_starting_token()[1] + all_data = primary_result_key.search(parsed) + if isinstance(all_data, (list, str)): + data = all_data[starting_truncation:] + else: + data = None + set_value_from_jmespath(parsed, primary_result_key.expression, data) + # We also need to truncate any secondary result keys + # because they were not truncated in the previous last + # response. + for token in self.result_keys: + if token == primary_result_key: + continue + sample = token.search(parsed) + if isinstance(sample, list): + empty_value = [] + elif isinstance(sample, str): + empty_value = '' + elif isinstance(sample, (int, float)): + # Even though we may be resuming from a truncated page, we + # still start from the actual numeric secondary result. For + # DynamoDB's Count/ScannedCount, this will still show how many + # items the server evaluated, even if the client is truncating + # due to a StartingToken. + empty_value = sample + else: + empty_value = None + set_value_from_jmespath(parsed, token.expression, empty_value) + return starting_truncation + + def _truncate_response( + self, + parsed, + primary_result_key, + truncate_amount, + starting_truncation, + next_token, + ): + original = primary_result_key.search(parsed) + if original is None: + original = [] + amount_to_keep = len(original) - truncate_amount + truncated = original[:amount_to_keep] + set_value_from_jmespath( + parsed, primary_result_key.expression, truncated + ) + # The issue here is that even though we know how much we've truncated + # we need to account for this globally including any starting + # left truncation. For example: + # Raw response: [0,1,2,3] + # Starting index: 1 + # Max items: 1 + # Starting left truncation: [1, 2, 3] + # End right truncation for max items: [1] + # However, even though we only kept 1, this is post + # left truncation so the next starting index should be 2, not 1 + # (left_truncation + amount_to_keep). + next_token['boto_truncate_amount'] = ( + amount_to_keep + starting_truncation + ) + self.resume_token = next_token + + def _get_next_token(self, parsed): + if self._more_results is not None: + if not self._more_results.search(parsed): + return {} + next_tokens = {} + for output_token, input_key in zip( + self._output_token, self._input_token + ): + next_token = output_token.search(parsed) + # We do not want to include any empty strings as actual tokens. + # Treat them as None. + if next_token: + next_tokens[input_key] = next_token + else: + next_tokens[input_key] = None + return next_tokens + + def result_key_iters(self): + teed_results = tee(self, len(self.result_keys)) + return [ + ResultKeyIterator(i, result_key) + for i, result_key in zip(teed_results, self.result_keys) + ] + + def build_full_result(self): + complete_result = {} + for response in self: + page = response + # We want to try to catch operation object pagination + # and format correctly for those. They come in the form + # of a tuple of two elements: (http_response, parsed_responsed). + # We want the parsed_response as that is what the page iterator + # uses. We can remove it though once operation objects are removed. + if isinstance(response, tuple) and len(response) == 2: + page = response[1] + # We're incrementally building the full response page + # by page. For each page in the response we need to + # inject the necessary components from the page + # into the complete_result. + for result_expression in self.result_keys: + # In order to incrementally update a result key + # we need to search the existing value from complete_result, + # then we need to search the _current_ page for the + # current result key value. Then we append the current + # value onto the existing value, and re-set that value + # as the new value. + result_value = result_expression.search(page) + if result_value is None: + continue + existing_value = result_expression.search(complete_result) + if existing_value is None: + # Set the initial result + set_value_from_jmespath( + complete_result, + result_expression.expression, + result_value, + ) + continue + # Now both result_value and existing_value contain something + if isinstance(result_value, list): + existing_value.extend(result_value) + elif isinstance(result_value, (int, float, str)): + # Modify the existing result with the sum or concatenation + set_value_from_jmespath( + complete_result, + result_expression.expression, + existing_value + result_value, + ) + merge_dicts(complete_result, self.non_aggregate_part) + if self.resume_token is not None: + complete_result['NextToken'] = self.resume_token + return complete_result + + def _parse_starting_token(self): + if self._starting_token is None: + return None + + # The starting token is a dict passed as a base64 encoded string. + next_token = self._starting_token + try: + next_token = self._token_decoder.decode(next_token) + index = 0 + if 'boto_truncate_amount' in next_token: + index = next_token.get('boto_truncate_amount') + del next_token['boto_truncate_amount'] + except (ValueError, TypeError): + next_token, index = self._parse_starting_token_deprecated() + return next_token, index + + def _parse_starting_token_deprecated(self): + """ + This handles parsing of old style starting tokens, and attempts to + coerce them into the new style. + """ + log.debug( + "Attempting to fall back to old starting token parser. For token: %s", + self._starting_token, + ) + if self._starting_token is None: + return None + + parts = self._starting_token.split('___') + next_token = [] + index = 0 + if len(parts) == len(self._input_token) + 1: + try: + index = int(parts.pop()) + except ValueError: + # This doesn't look like a valid old-style token, so we're + # passing it along as an opaque service token. + parts = [self._starting_token] + + for part in parts: + if part == 'None': + next_token.append(None) + else: + next_token.append(part) + return self._convert_deprecated_starting_token(next_token), index + + def _convert_deprecated_starting_token(self, deprecated_token): + """ + This attempts to convert a deprecated starting token into the new + style. + """ + len_deprecated_token = len(deprecated_token) + len_input_token = len(self._input_token) + if len_deprecated_token > len_input_token: + raise ValueError(f"Bad starting token: {self._starting_token}") + elif len_deprecated_token < len_input_token: + log.debug( + "Old format starting token does not contain all input " + "tokens. Setting the rest, in order, as None." + ) + for i in range(len_input_token - len_deprecated_token): + deprecated_token.append(None) + return dict(zip(self._input_token, deprecated_token)) + + +class Paginator: + PAGE_ITERATOR_CLS = PageIterator + + def __init__(self, method, pagination_config, model): + self._model = model + self._method = method + self._pagination_cfg = pagination_config + self._output_token = self._get_output_tokens(self._pagination_cfg) + self._input_token = self._get_input_tokens(self._pagination_cfg) + self._more_results = self._get_more_results_token(self._pagination_cfg) + self._non_aggregate_keys = self._get_non_aggregate_keys( + self._pagination_cfg + ) + self._result_keys = self._get_result_keys(self._pagination_cfg) + self._limit_key = self._get_limit_key(self._pagination_cfg) + + @property + def result_keys(self): + return self._result_keys + + def _get_non_aggregate_keys(self, config): + keys = [] + for key in config.get('non_aggregate_keys', []): + keys.append(jmespath.compile(key)) + return keys + + def _get_output_tokens(self, config): + output = [] + output_token = config['output_token'] + if not isinstance(output_token, list): + output_token = [output_token] + for config in output_token: + output.append(jmespath.compile(config)) + return output + + def _get_input_tokens(self, config): + input_token = self._pagination_cfg['input_token'] + if not isinstance(input_token, list): + input_token = [input_token] + return input_token + + def _get_more_results_token(self, config): + more_results = config.get('more_results') + if more_results is not None: + return jmespath.compile(more_results) + + def _get_result_keys(self, config): + result_key = config.get('result_key') + if result_key is not None: + if not isinstance(result_key, list): + result_key = [result_key] + result_key = [jmespath.compile(rk) for rk in result_key] + return result_key + + def _get_limit_key(self, config): + return config.get('limit_key') + + def paginate(self, **kwargs): + """Create paginator object for an operation. + + This returns an iterable object. Iterating over + this object will yield a single page of a response + at a time. + + """ + page_params = self._extract_paging_params(kwargs) + return self.PAGE_ITERATOR_CLS( + self._method, + self._input_token, + self._output_token, + self._more_results, + self._result_keys, + self._non_aggregate_keys, + self._limit_key, + page_params['MaxItems'], + page_params['StartingToken'], + page_params['PageSize'], + kwargs, + ) + + def _extract_paging_params(self, kwargs): + pagination_config = kwargs.pop('PaginationConfig', {}) + max_items = pagination_config.get('MaxItems', None) + if max_items is not None: + max_items = int(max_items) + page_size = pagination_config.get('PageSize', None) + if page_size is not None: + if self._limit_key is None: + raise PaginationError( + message="PageSize parameter is not supported for the " + "pagination interface for this operation." + ) + input_members = self._model.input_shape.members + limit_key_shape = input_members.get(self._limit_key) + if limit_key_shape.type_name == 'string': + if not isinstance(page_size, str): + page_size = str(page_size) + else: + page_size = int(page_size) + return { + 'MaxItems': max_items, + 'StartingToken': pagination_config.get('StartingToken', None), + 'PageSize': page_size, + } + + +class ResultKeyIterator: + """Iterates over the results of paginated responses. + + Each iterator is associated with a single result key. + Iterating over this object will give you each element in + the result key list. + + :param pages_iterator: An iterator that will give you + pages of results (a ``PageIterator`` class). + :param result_key: The JMESPath expression representing + the result key. + + """ + + def __init__(self, pages_iterator, result_key): + self._pages_iterator = pages_iterator + self.result_key = result_key + + def __iter__(self): + for page in self._pages_iterator: + results = self.result_key.search(page) + if results is None: + results = [] + yield from results diff --git a/py311/lib/python3.11/site-packages/botocore/parsers.py b/py311/lib/python3.11/site-packages/botocore/parsers.py new file mode 100644 index 0000000000000000000000000000000000000000..9f1e6049d1ff4d81e9c11f0014de6c044ba11a80 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/parsers.py @@ -0,0 +1,1484 @@ +# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +"""Response parsers for the various protocol types. + +The module contains classes that can take an HTTP response, and given +an output shape, parse the response into a dict according to the +rules in the output shape. + +There are many similarities amongst the different protocols with regard +to response parsing, and the code is structured in a way to avoid +code duplication when possible. The diagram below is a diagram +showing the inheritance hierarchy of the response classes. + +:: + + + +-------------------+ + | ResponseParser | + +-------------------+ + ^ ^ ^ ^ ^ + | | | | | + | | | | +--------------------------------------------+ + | | | +-----------------------------+ | + | | | | | + +--------------------+ | +----------------+ | | + | | | | | ++----------+----------+ +------+-------+ +-------+------+ +------+-------+ +------+--------+ +|BaseXMLResponseParser| |BaseRestParser| |BaseJSONParser| |BaseCBORParser| |BaseRpcV2Parser| ++---------------------+ +--------------+ +--------------+ +----------+---+ +-+-------------+ + ^ ^ ^ ^ ^ ^ ^ ^ + | | | | | | | | + | | | | | | | | + | ++----------+-+ +-+--------+---+ | +---+---------+-+ + | |RestXMLParser| |RestJSONParser| | |RpcV2CBORParser| + +-----+-----+ +-------------+ +--------------+ | +---+---------+-+ + |QueryParser| | + +-----------+ +----+-----+ + |JSONParser| + +----------+ + +The diagram above shows that there is a base class, ``ResponseParser`` that +contains logic that is similar amongst all the different protocols (``query``, +``json``, ``rest-json``, ``rest-xml``, ``smithy-rpc-v2-cbor``). Amongst the various services +there is shared logic that can be grouped several ways: + +* The ``query`` and ``rest-xml`` both have XML bodies that are parsed in the + same way. +* The ``json`` and ``rest-json`` protocols both have JSON bodies that are + parsed in the same way. +* The ``rest-json`` and ``rest-xml`` protocols have additional attributes + besides body parameters that are parsed the same (headers, query string, + status code). + +This is reflected in the class diagram above. The ``BaseXMLResponseParser`` +and the BaseJSONParser contain logic for parsing the XML/JSON body, +and the BaseRestParser contains logic for parsing out attributes that +come from other parts of the HTTP response. Classes like the +``RestXMLParser`` inherit from the ``BaseXMLResponseParser`` to get the +XML body parsing logic and the ``BaseRestParser`` to get the HTTP +header/status code/query string parsing. + +Additionally, there are event stream parsers that are used by the other parsers +to wrap streaming bodies that represent a stream of events. The +BaseEventStreamParser extends from ResponseParser and defines the logic for +parsing values from the headers and payload of a message from the underlying +binary encoding protocol. Currently, event streams support parsing bodies +encoded as JSON and XML through the following hierarchy. + + + +--------------+ + |ResponseParser| + +--------------+ + ^ ^ ^ + +--------------------+ | +------------------+ + | | | + +----------+----------+ +----------+----------+ +-------+------+ + |BaseXMLResponseParser| |BaseEventStreamParser| |BaseJSONParser| + +---------------------+ +---------------------+ +--------------+ + ^ ^ ^ ^ + | | | | + | | | | + +-+----------------+-+ +-+-----------------+-+ + |EventStreamXMLParser| |EventStreamJSONParser| + +--------------------+ +---------------------+ + +Return Values +============= + +Each call to ``parse()`` returns a dict has this form:: + + Standard Response + + { + "ResponseMetadata": {"RequestId": } + + } + + Error response + + { + "ResponseMetadata": {"RequestId": } + "Error": { + "Code": , + "Message": , + "Type": , + + } + } + +""" + +import base64 +import http.client +import io +import json +import logging +import os +import re +import struct + +from botocore.compat import ETree, XMLParseError +from botocore.eventstream import EventStream, NoInitialResponseError +from botocore.utils import ( + CachedProperty, + ensure_boolean, + is_json_value_header, + lowercase_dict, + merge_dicts, + parse_timestamp, +) + +LOG = logging.getLogger(__name__) + +DEFAULT_TIMESTAMP_PARSER = parse_timestamp + + +class ResponseParserFactory: + def __init__(self): + self._defaults = {} + + def set_parser_defaults(self, **kwargs): + """Set default arguments when a parser instance is created. + + You can specify any kwargs that are allowed by a ResponseParser + class. There are currently two arguments: + + * timestamp_parser - A callable that can parse a timestamp string + * blob_parser - A callable that can parse a blob type + + """ + self._defaults.update(kwargs) + + def create_parser(self, protocol_name): + parser_cls = PROTOCOL_PARSERS[protocol_name] + return parser_cls(**self._defaults) + + +def create_parser(protocol): + return ResponseParserFactory().create_parser(protocol) + + +def _text_content(func): + # This decorator hides the difference between + # an XML node with text or a plain string. It's used + # to ensure that scalar processing operates only on text + # strings, which allows the same scalar handlers to be used + # for XML nodes from the body and HTTP headers. + def _get_text_content(self, shape, node_or_string): + if hasattr(node_or_string, 'text'): + text = node_or_string.text + if text is None: + # If an XML node is empty , + # we want to parse that as an empty string, + # not as a null/None value. + text = '' + else: + text = node_or_string + return func(self, shape, text) + + return _get_text_content + + +class ResponseParserError(Exception): + pass + + +class ResponseParser: + """Base class for response parsing. + + This class represents the interface that all ResponseParsers for the + various protocols must implement. + + This class will take an HTTP response and a model shape and parse the + HTTP response into a dictionary. + + There is a single public method exposed: ``parse``. See the ``parse`` + docstring for more info. + + """ + + DEFAULT_ENCODING = 'utf-8' + EVENT_STREAM_PARSER_CLS = None + # This is a list of known values for the 'location' key in the + # serialization dict. The location key tells us where in the response + # to parse the value. Members with locations that aren't in this list + # will be parsed from the body. + KNOWN_LOCATIONS = ('header', 'headers', 'statusCode') + + def __init__(self, timestamp_parser=None, blob_parser=None): + if timestamp_parser is None: + timestamp_parser = DEFAULT_TIMESTAMP_PARSER + self._timestamp_parser = timestamp_parser + if blob_parser is None: + blob_parser = self._default_blob_parser + self._blob_parser = blob_parser + self._event_stream_parser = None + if self.EVENT_STREAM_PARSER_CLS is not None: + self._event_stream_parser = self.EVENT_STREAM_PARSER_CLS( + timestamp_parser, blob_parser + ) + + def _default_blob_parser(self, value): + # Blobs are always returned as bytes type (this matters on python3). + # We don't decode this to a str because it's entirely possible that the + # blob contains binary data that actually can't be decoded. + return base64.b64decode(value) + + def parse(self, response, shape): + """Parse the HTTP response given a shape. + + :param response: The HTTP response dictionary. This is a dictionary + that represents the HTTP request. The dictionary must have the + following keys, ``body``, ``headers``, and ``status_code``. + + :param shape: The model shape describing the expected output. + :return: Returns a dictionary representing the parsed response + described by the model. In addition to the shape described from + the model, each response will also have a ``ResponseMetadata`` + which contains metadata about the response, which contains at least + two keys containing ``RequestId`` and ``HTTPStatusCode``. Some + responses may populate additional keys, but ``RequestId`` will + always be present. + + """ + LOG.debug('Response headers: %r', response['headers']) + LOG.debug('Response body:\n%r', response['body']) + if response['status_code'] >= 301: + if self._is_generic_error_response(response): + parsed = self._do_generic_error_parse(response) + elif self._is_modeled_error_shape(shape): + parsed = self._do_modeled_error_parse(response, shape) + # We don't want to decorate the modeled fields with metadata + return parsed + else: + parsed = self._do_error_parse(response, shape) + else: + parsed = self._do_parse(response, shape) + + # We don't want to decorate event stream responses with metadata + if shape and shape.serialization.get('eventstream'): + return parsed + + # Add ResponseMetadata if it doesn't exist and inject the HTTP + # status code and headers from the response. + if isinstance(parsed, dict): + response_metadata = parsed.get('ResponseMetadata', {}) + response_metadata['HTTPStatusCode'] = response['status_code'] + # Ensure that the http header keys are all lower cased. Older + # versions of urllib3 (< 1.11) would unintentionally do this for us + # (see urllib3#633). We need to do this conversion manually now. + headers = response['headers'] + response_metadata['HTTPHeaders'] = lowercase_dict(headers) + parsed['ResponseMetadata'] = response_metadata + self._add_checksum_response_metadata(response, response_metadata) + return parsed + + def _add_checksum_response_metadata(self, response, response_metadata): + checksum_context = response.get('context', {}).get('checksum', {}) + algorithm = checksum_context.get('response_algorithm') + if algorithm: + response_metadata['ChecksumAlgorithm'] = algorithm + + def _is_modeled_error_shape(self, shape): + return shape is not None and shape.metadata.get('exception', False) + + def _is_generic_error_response(self, response): + # There are times when a service will respond with a generic + # error response such as: + # 'Http/1.1 Service Unavailable' + # + # This can also happen if you're going through a proxy. + # In this case the protocol specific _do_error_parse will either + # fail to parse the response (in the best case) or silently succeed + # and treat the HTML above as an XML response and return + # non sensical parsed data. + # To prevent this case from happening we first need to check + # whether or not this response looks like the generic response. + if response['status_code'] >= 500: + if 'body' not in response or response['body'] is None: + return True + + body = response['body'].strip() + return body.startswith(b'') or not body + + def _do_generic_error_parse(self, response): + # There's not really much we can do when we get a generic + # html response. + LOG.debug( + "Received a non protocol specific error response from the " + "service, unable to populate error code and message." + ) + return { + 'Error': { + 'Code': str(response['status_code']), + 'Message': http.client.responses.get( + response['status_code'], '' + ), + }, + 'ResponseMetadata': {}, + } + + def _do_parse(self, response, shape): + raise NotImplementedError(f"{self.__class__.__name__}._do_parse") + + def _do_error_parse(self, response, shape): + raise NotImplementedError(f"{self.__class__.__name__}._do_error_parse") + + def _do_modeled_error_parse(self, response, shape, parsed): + raise NotImplementedError( + f"{self.__class__.__name__}._do_modeled_error_parse" + ) + + def _parse_shape(self, shape, node): + handler = getattr( + self, f'_handle_{shape.type_name}', self._default_handle + ) + return handler(shape, node) + + def _handle_list(self, shape, node): + # Enough implementations share list serialization that it's moved + # up here in the base class. + parsed = [] + member_shape = shape.member + for item in node: + parsed.append(self._parse_shape(member_shape, item)) + return parsed + + def _default_handle(self, shape, value): + return value + + def _create_event_stream(self, response, shape): + parser = self._event_stream_parser + name = response['context'].get('operation_name') + return EventStream(response['body'], shape, parser, name) + + def _get_first_key(self, value): + return list(value)[0] + + def _has_unknown_tagged_union_member(self, shape, value): + if shape.is_tagged_union: + cleaned_value = value.copy() + cleaned_value.pop("__type", None) + cleaned_value = { + k: v for k, v in cleaned_value.items() if v is not None + } + if len(cleaned_value) != 1: + error_msg = ( + "Invalid service response: %s must have one and only " + "one member set." + ) + raise ResponseParserError(error_msg % shape.name) + tag = self._get_first_key(cleaned_value) + serialized_member_names = [ + shape.members[member].serialization.get('name', member) + for member in shape.members + ] + if tag not in serialized_member_names: + LOG.info( + "Received a tagged union response with member unknown to client: %s. " + "Please upgrade SDK for full response support.", + tag, + ) + return True + return False + + def _handle_unknown_tagged_union_member(self, tag): + return {'SDK_UNKNOWN_MEMBER': {'name': tag}} + + def _do_query_compatible_error_parse(self, code, headers, error): + """ + Error response may contain an x-amzn-query-error header to translate + errors codes from former `query` services into other protocols. We use this + to do our lookup in the errorfactory for modeled errors. + """ + query_error = headers['x-amzn-query-error'] + query_error_components = query_error.split(';') + + if len(query_error_components) == 2 and query_error_components[0]: + error['Error']['QueryErrorCode'] = code + error['Error']['Type'] = query_error_components[1] + return query_error_components[0] + return code + + +class BaseXMLResponseParser(ResponseParser): + def __init__(self, timestamp_parser=None, blob_parser=None): + super().__init__(timestamp_parser, blob_parser) + self._namespace_re = re.compile('{.*}') + + def _handle_map(self, shape, node): + parsed = {} + key_shape = shape.key + value_shape = shape.value + key_location_name = key_shape.serialization.get('name') or 'key' + value_location_name = value_shape.serialization.get('name') or 'value' + if shape.serialization.get('flattened') and not isinstance(node, list): + node = [node] + for keyval_node in node: + for single_pair in keyval_node: + # Within each there's a and a + tag_name = self._node_tag(single_pair) + if tag_name == key_location_name: + key_name = self._parse_shape(key_shape, single_pair) + elif tag_name == value_location_name: + val_name = self._parse_shape(value_shape, single_pair) + else: + raise ResponseParserError(f"Unknown tag: {tag_name}") + parsed[key_name] = val_name + return parsed + + def _node_tag(self, node): + return self._namespace_re.sub('', node.tag) + + def _handle_list(self, shape, node): + # When we use _build_name_to_xml_node, repeated elements are aggregated + # into a list. However, we can't tell the difference between a scalar + # value and a single element flattened list. So before calling the + # real _handle_list, we know that "node" should actually be a list if + # it's flattened, and if it's not, then we make it a one element list. + if shape.serialization.get('flattened') and not isinstance(node, list): + node = [node] + return super()._handle_list(shape, node) + + def _handle_structure(self, shape, node): + parsed = {} + members = shape.members + if shape.metadata.get('exception', False): + node = self._get_error_root(node) + xml_dict = self._build_name_to_xml_node(node) + if self._has_unknown_tagged_union_member(shape, xml_dict): + tag = self._get_first_key(xml_dict) + return self._handle_unknown_tagged_union_member(tag) + for member_name in members: + member_shape = members[member_name] + location = member_shape.serialization.get('location') + if ( + location in self.KNOWN_LOCATIONS + or member_shape.serialization.get('eventheader') + ): + # All members with known locations have already been handled, + # so we don't need to parse these members. + continue + xml_name = self._member_key_name(member_shape, member_name) + member_node = xml_dict.get(xml_name) + if member_node is not None: + parsed[member_name] = self._parse_shape( + member_shape, member_node + ) + elif member_shape.serialization.get('xmlAttribute'): + attribs = {} + location_name = member_shape.serialization['name'] + for key, value in node.attrib.items(): + new_key = self._namespace_re.sub( + location_name.split(':')[0] + ':', key + ) + attribs[new_key] = value + if location_name in attribs: + parsed[member_name] = attribs[location_name] + return parsed + + def _get_error_root(self, original_root): + if self._node_tag(original_root) == 'ErrorResponse': + for child in original_root: + if self._node_tag(child) == 'Error': + return child + return original_root + + def _member_key_name(self, shape, member_name): + # This method is needed because we have to special case flattened list + # with a serialization name. If this is the case we use the + # locationName from the list's member shape as the key name for the + # surrounding structure. + if shape.type_name == 'list' and shape.serialization.get('flattened'): + list_member_serialized_name = shape.member.serialization.get( + 'name' + ) + if list_member_serialized_name is not None: + return list_member_serialized_name + serialized_name = shape.serialization.get('name') + if serialized_name is not None: + return serialized_name + return member_name + + def _build_name_to_xml_node(self, parent_node): + # If the parent node is actually a list. We should not be trying + # to serialize it to a dictionary. Instead, return the first element + # in the list. + if isinstance(parent_node, list): + return self._build_name_to_xml_node(parent_node[0]) + xml_dict = {} + for item in parent_node: + key = self._node_tag(item) + if key in xml_dict: + # If the key already exists, the most natural + # way to handle this is to aggregate repeated + # keys into a single list. + # 12 -> {'foo': [Node(1), Node(2)]} + if isinstance(xml_dict[key], list): + xml_dict[key].append(item) + else: + # Convert from a scalar to a list. + xml_dict[key] = [xml_dict[key], item] + else: + xml_dict[key] = item + return xml_dict + + def _parse_xml_string_to_dom(self, xml_string): + try: + parser = ETree.XMLParser( + target=ETree.TreeBuilder(), encoding=self.DEFAULT_ENCODING + ) + parser.feed(xml_string) + root = parser.close() + except XMLParseError as e: + raise ResponseParserError( + f"Unable to parse response ({e}), " + f"invalid XML received. Further retries may succeed:\n{xml_string}" + ) + return root + + def _replace_nodes(self, parsed): + for key, value in parsed.items(): + if list(value): + sub_dict = self._build_name_to_xml_node(value) + parsed[key] = self._replace_nodes(sub_dict) + else: + parsed[key] = value.text + return parsed + + @_text_content + def _handle_boolean(self, shape, text): + if text == 'true': + return True + else: + return False + + @_text_content + def _handle_float(self, shape, text): + return float(text) + + @_text_content + def _handle_timestamp(self, shape, text): + return self._timestamp_parser(text) + + @_text_content + def _handle_integer(self, shape, text): + return int(text) + + @_text_content + def _handle_string(self, shape, text): + return text + + @_text_content + def _handle_blob(self, shape, text): + return self._blob_parser(text) + + _handle_character = _handle_string + _handle_double = _handle_float + _handle_long = _handle_integer + + +class QueryParser(BaseXMLResponseParser): + def _do_error_parse(self, response, shape): + xml_contents = response['body'] + root = self._parse_xml_string_to_dom(xml_contents) + parsed = self._build_name_to_xml_node(root) + self._replace_nodes(parsed) + # Once we've converted xml->dict, we need to make one or two + # more adjustments to extract nested errors and to be consistent + # with ResponseMetadata for non-error responses: + # 1. {"Errors": {"Error": {...}}} -> {"Error": {...}} + # 2. {"RequestId": "id"} -> {"ResponseMetadata": {"RequestId": "id"}} + if 'Errors' in parsed: + parsed.update(parsed.pop('Errors')) + if 'RequestId' in parsed: + parsed['ResponseMetadata'] = {'RequestId': parsed.pop('RequestId')} + return parsed + + def _do_modeled_error_parse(self, response, shape): + return self._parse_body_as_xml(response, shape, inject_metadata=False) + + def _do_parse(self, response, shape): + return self._parse_body_as_xml(response, shape, inject_metadata=True) + + def _parse_body_as_xml(self, response, shape, inject_metadata=True): + xml_contents = response['body'] + root = self._parse_xml_string_to_dom(xml_contents) + parsed = {} + if shape is not None: + start = root + if 'resultWrapper' in shape.serialization: + start = self._find_result_wrapped_shape( + shape.serialization['resultWrapper'], root + ) + parsed = self._parse_shape(shape, start) + if inject_metadata: + self._inject_response_metadata(root, parsed) + return parsed + + def _find_result_wrapped_shape(self, element_name, xml_root_node): + mapping = self._build_name_to_xml_node(xml_root_node) + return mapping[element_name] + + def _inject_response_metadata(self, node, inject_into): + mapping = self._build_name_to_xml_node(node) + child_node = mapping.get('ResponseMetadata') + if child_node is not None: + sub_mapping = self._build_name_to_xml_node(child_node) + for key, value in sub_mapping.items(): + sub_mapping[key] = value.text + inject_into['ResponseMetadata'] = sub_mapping + + +class EC2QueryParser(QueryParser): + def _inject_response_metadata(self, node, inject_into): + mapping = self._build_name_to_xml_node(node) + child_node = mapping.get('requestId') + if child_node is not None: + inject_into['ResponseMetadata'] = {'RequestId': child_node.text} + + def _do_error_parse(self, response, shape): + # EC2 errors look like: + # + # + # + # InvalidInstanceID.Malformed + # Invalid id: "1343124" + # + # + # 12345 + # + # This is different from QueryParser in that it's RequestID, + # not RequestId + original = super()._do_error_parse(response, shape) + if 'RequestID' in original: + original['ResponseMetadata'] = { + 'RequestId': original.pop('RequestID') + } + return original + + def _get_error_root(self, original_root): + for child in original_root: + if self._node_tag(child) == 'Errors': + for errors_child in child: + if self._node_tag(errors_child) == 'Error': + return errors_child + return original_root + + +class BaseJSONParser(ResponseParser): + def _handle_structure(self, shape, value): + final_parsed = {} + if shape.is_document_type: + final_parsed = value + else: + member_shapes = shape.members + if value is None: + # If the comes across the wire as "null" (None in python), + # we should be returning this unchanged, instead of as an + # empty dict. + return None + final_parsed = {} + if self._has_unknown_tagged_union_member(shape, value): + tag = self._get_first_key(value) + return self._handle_unknown_tagged_union_member(tag) + for member_name in member_shapes: + member_shape = member_shapes[member_name] + json_name = member_shape.serialization.get('name', member_name) + raw_value = value.get(json_name) + if raw_value is not None: + final_parsed[member_name] = self._parse_shape( + member_shapes[member_name], raw_value + ) + return final_parsed + + def _handle_map(self, shape, value): + parsed = {} + key_shape = shape.key + value_shape = shape.value + for key, value in value.items(): + actual_key = self._parse_shape(key_shape, key) + actual_value = self._parse_shape(value_shape, value) + parsed[actual_key] = actual_value + return parsed + + def _handle_blob(self, shape, value): + return self._blob_parser(value) + + def _handle_timestamp(self, shape, value): + return self._timestamp_parser(value) + + def _do_error_parse(self, response, shape): + body = self._parse_body_as_json(response['body']) + error = {"Error": {"Message": '', "Code": ''}, "ResponseMetadata": {}} + headers = response['headers'] + # Error responses can have slightly different structures for json. + # The basic structure is: + # + # {"__type":"ConnectClientException", + # "message":"The error message."} + + # The error message can either come in the 'message' or 'Message' key + # so we need to check for both. + error['Error']['Message'] = body.get( + 'message', body.get('Message', '') + ) + # if the message did not contain an error code + # include the response status code + response_code = response.get('status_code') + + code = body.get('__type', response_code and str(response_code)) + if code is not None: + # code has a couple forms as well: + # * "com.aws.dynamodb.vAPI#ProvisionedThroughputExceededException" + # * "ResourceNotFoundException" + if ':' in code: + code = code.split(':', 1)[0] + if '#' in code: + code = code.rsplit('#', 1)[1] + if 'x-amzn-query-error' in headers: + code = self._do_query_compatible_error_parse( + code, headers, error + ) + error['Error']['Code'] = code + self._inject_response_metadata(error, response['headers']) + return error + + def _inject_response_metadata(self, parsed, headers): + if 'x-amzn-requestid' in headers: + parsed.setdefault('ResponseMetadata', {})['RequestId'] = headers[ + 'x-amzn-requestid' + ] + + def _parse_body_as_json(self, body_contents): + if not body_contents: + return {} + body = body_contents.decode(self.DEFAULT_ENCODING) + try: + original_parsed = json.loads(body) + return original_parsed + except ValueError: + # if the body cannot be parsed, include + # the literal string as the message + return {'message': body} + + +class BaseCBORParser(ResponseParser): + INDEFINITE_ITEM_ADDITIONAL_INFO = 31 + BREAK_CODE = 0xFF + + @CachedProperty + def major_type_to_parsing_method_map(self): + return { + 0: self._parse_unsigned_integer, + 1: self._parse_negative_integer, + 2: self._parse_byte_string, + 3: self._parse_text_string, + 4: self._parse_array, + 5: self._parse_map, + 6: self._parse_tag, + 7: self._parse_simple_and_float, + } + + def get_peekable_stream_from_bytes(self, bytes): + return io.BufferedReader(io.BytesIO(bytes)) + + def parse_data_item(self, stream): + # CBOR data is divided into "data items", and each data item starts + # with an initial byte that describes how the following bytes should be parsed + initial_byte = self._read_bytes_as_int(stream, 1) + # The highest order three bits of the initial byte describe the CBOR major type + major_type = initial_byte >> 5 + # The lowest order 5 bits of the initial byte tells us more information about + # how the bytes should be parsed that will be used + additional_info = initial_byte & 0b00011111 + + if major_type in self.major_type_to_parsing_method_map: + method = self.major_type_to_parsing_method_map[major_type] + return method(stream, additional_info) + else: + raise ResponseParserError( + f"Unsupported inital byte found for data item- " + f"Major type:{major_type}, Additional info: " + f"{additional_info}" + ) + + # Major type 0 - unsigned integers + def _parse_unsigned_integer(self, stream, additional_info): + additional_info_to_num_bytes = { + 24: 1, + 25: 2, + 26: 4, + 27: 8, + } + # Values under 24 don't need a full byte to be stored; their values are + # instead stored as the "additional info" in the initial byte + if additional_info < 24: + return additional_info + elif additional_info in additional_info_to_num_bytes: + num_bytes = additional_info_to_num_bytes[additional_info] + return self._read_bytes_as_int(stream, num_bytes) + else: + raise ResponseParserError( + "Invalid CBOR integer returned from the service; unparsable " + f"additional info found for major type 0 or 1: {additional_info}" + ) + + # Major type 1 - negative integers + def _parse_negative_integer(self, stream, additional_info): + return -1 - self._parse_unsigned_integer(stream, additional_info) + + # Major type 2 - byte string + def _parse_byte_string(self, stream, additional_info): + if additional_info != self.INDEFINITE_ITEM_ADDITIONAL_INFO: + length = self._parse_unsigned_integer(stream, additional_info) + return self._read_from_stream(stream, length) + else: + chunks = [] + while True: + if self._handle_break_code(stream): + break + initial_byte = self._read_bytes_as_int(stream, 1) + additional_info = initial_byte & 0b00011111 + length = self._parse_unsigned_integer(stream, additional_info) + chunks.append(self._read_from_stream(stream, length)) + return b''.join(chunks) + + # Major type 3 - text string + def _parse_text_string(self, stream, additional_info): + return self._parse_byte_string(stream, additional_info).decode('utf-8') + + # Major type 4 - lists + def _parse_array(self, stream, additional_info): + if additional_info != self.INDEFINITE_ITEM_ADDITIONAL_INFO: + length = self._parse_unsigned_integer(stream, additional_info) + return [self.parse_data_item(stream) for _ in range(length)] + else: + items = [] + while not self._handle_break_code(stream): + items.append(self.parse_data_item(stream)) + return items + + # Major type 5 - maps + def _parse_map(self, stream, additional_info): + items = {} + if additional_info != self.INDEFINITE_ITEM_ADDITIONAL_INFO: + length = self._parse_unsigned_integer(stream, additional_info) + for _ in range(length): + self._parse_key_value_pair(stream, items) + return items + + else: + while not self._handle_break_code(stream): + self._parse_key_value_pair(stream, items) + return items + + def _parse_key_value_pair(self, stream, items): + key = self.parse_data_item(stream) + value = self.parse_data_item(stream) + if value is not None: + items[key] = value + + # Major type 6 is tags. The only tag we currently support is tag 1 for unix + # timestamps + def _parse_tag(self, stream, additional_info): + tag = self._parse_unsigned_integer(stream, additional_info) + value = self.parse_data_item(stream) + if tag == 1: # Epoch-based date/time in milliseconds + return self._parse_datetime(value) + else: + raise ResponseParserError( + f"Found CBOR tag not supported by botocore: {tag}" + ) + + def _parse_datetime(self, value): + if isinstance(value, (int, float)): + return self._timestamp_parser(value) + else: + raise ResponseParserError( + f"Unable to parse datetime value: {value}" + ) + + # Major type 7 includes floats and "simple" types. Supported simple types are + # currently boolean values, CBOR's null, and CBOR's undefined type. All other + # values are either floats or invalid. + def _parse_simple_and_float(self, stream, additional_info): + # For major type 7, values 20-23 correspond to CBOR "simple" values + additional_info_simple_values = { + 20: False, # CBOR false + 21: True, # CBOR true + 22: None, # CBOR null + 23: None, # CBOR undefined + } + # First we check if the additional info corresponds to a supported simple value + if additional_info in additional_info_simple_values: + return additional_info_simple_values[additional_info] + + # If it's not a simple value, we need to parse it into the correct format and + # number fo bytes + float_formats = { + 25: ('>e', 2), + 26: ('>f', 4), + 27: ('>d', 8), + } + + if additional_info in float_formats: + float_format, num_bytes = float_formats[additional_info] + return struct.unpack( + float_format, self._read_from_stream(stream, num_bytes) + )[0] + raise ResponseParserError( + f"Invalid additional info found for major type 7: {additional_info}. " + f"This indicates an unsupported simple type or an indefinite float value" + ) + + # This helper method is intended for use when parsing indefinite length items. + # It does nothing if the next byte is not the break code. If the next byte is + # the break code, it advances past that byte and returns True so the calling + # method knows to stop parsing that data item. + def _handle_break_code(self, stream): + if int.from_bytes(stream.peek(1)[:1], 'big') == self.BREAK_CODE: + stream.seek(1, os.SEEK_CUR) + return True + + def _read_bytes_as_int(self, stream, num_bytes): + byte = self._read_from_stream(stream, num_bytes) + return int.from_bytes(byte, 'big') + + def _read_from_stream(self, stream, num_bytes): + value = stream.read(num_bytes) + if len(value) != num_bytes: + raise ResponseParserError( + "End of stream reached; this indicates a " + "malformed CBOR response from the server or an " + "issue in botocore" + ) + return value + + +class BaseEventStreamParser(ResponseParser): + def _do_parse(self, response, shape): + final_parsed = {} + if shape.serialization.get('eventstream'): + event_type = response['headers'].get(':event-type') + event_shape = shape.members.get(event_type) + if event_shape: + final_parsed[event_type] = self._do_parse( + response, event_shape + ) + else: + self._parse_non_payload_attrs( + response, shape, shape.members, final_parsed + ) + self._parse_payload(response, shape, shape.members, final_parsed) + return final_parsed + + def _do_error_parse(self, response, shape): + exception_type = response['headers'].get(':exception-type') + exception_shape = shape.members.get(exception_type) + if exception_shape is not None: + original_parsed = self._initial_body_parse(response['body']) + body = self._parse_shape(exception_shape, original_parsed) + error = { + 'Error': { + 'Code': exception_type, + 'Message': body.get('Message', body.get('message', '')), + } + } + else: + error = { + 'Error': { + 'Code': response['headers'].get(':error-code', ''), + 'Message': response['headers'].get(':error-message', ''), + } + } + return error + + def _parse_payload(self, response, shape, member_shapes, final_parsed): + if shape.serialization.get('event'): + for name in member_shapes: + member_shape = member_shapes[name] + if member_shape.serialization.get('eventpayload'): + body = response['body'] + if member_shape.type_name == 'blob': + parsed_body = body + elif member_shape.type_name == 'string': + parsed_body = body.decode(self.DEFAULT_ENCODING) + else: + raw_parse = self._initial_body_parse(body) + parsed_body = self._parse_shape( + member_shape, raw_parse + ) + final_parsed[name] = parsed_body + return + # If we didn't find an explicit payload, use the current shape + original_parsed = self._initial_body_parse(response['body']) + body_parsed = self._parse_shape(shape, original_parsed) + final_parsed.update(body_parsed) + + def _parse_non_payload_attrs( + self, response, shape, member_shapes, final_parsed + ): + headers = response['headers'] + for name in member_shapes: + member_shape = member_shapes[name] + if member_shape.serialization.get('eventheader'): + if name in headers: + value = headers[name] + if member_shape.type_name == 'timestamp': + # Event stream timestamps are an in milleseconds so we + # divide by 1000 to convert to seconds. + value = self._timestamp_parser(value / 1000.0) + final_parsed[name] = value + + def _initial_body_parse(self, body_contents): + # This method should do the initial xml/json parsing of the + # body. We still need to walk the parsed body in order + # to convert types, but this method will do the first round + # of parsing. + raise NotImplementedError("_initial_body_parse") + + +class EventStreamJSONParser(BaseEventStreamParser, BaseJSONParser): + def _initial_body_parse(self, body_contents): + return self._parse_body_as_json(body_contents) + + +class EventStreamXMLParser(BaseEventStreamParser, BaseXMLResponseParser): + def _initial_body_parse(self, xml_string): + if not xml_string: + return ETree.Element('') + return self._parse_xml_string_to_dom(xml_string) + + +class EventStreamCBORParser(BaseEventStreamParser, BaseCBORParser): + def _initial_body_parse(self, body_contents): + if body_contents == b'': + return {} + return self.parse_data_item( + self.get_peekable_stream_from_bytes(body_contents) + ) + + +class JSONParser(BaseJSONParser): + EVENT_STREAM_PARSER_CLS = EventStreamJSONParser + + """Response parser for the "json" protocol.""" + + def _do_parse(self, response, shape): + parsed = {} + if shape is not None: + event_name = shape.event_stream_name + if event_name: + parsed = self._handle_event_stream(response, shape, event_name) + else: + parsed = self._handle_json_body(response['body'], shape) + self._inject_response_metadata(parsed, response['headers']) + return parsed + + def _do_modeled_error_parse(self, response, shape): + return self._handle_json_body(response['body'], shape) + + def _handle_event_stream(self, response, shape, event_name): + event_stream_shape = shape.members[event_name] + event_stream = self._create_event_stream(response, event_stream_shape) + try: + event = event_stream.get_initial_response() + except NoInitialResponseError: + error_msg = 'First event was not of type initial-response' + raise ResponseParserError(error_msg) + parsed = self._handle_json_body(event.payload, shape) + parsed[event_name] = event_stream + return parsed + + def _handle_json_body(self, raw_body, shape): + # The json.loads() gives us the primitive JSON types, + # but we need to traverse the parsed JSON data to convert + # to richer types (blobs, timestamps, etc. + parsed_json = self._parse_body_as_json(raw_body) + return self._parse_shape(shape, parsed_json) + + +class BaseRestParser(ResponseParser): + def _do_parse(self, response, shape): + final_parsed = {} + final_parsed['ResponseMetadata'] = self._populate_response_metadata( + response + ) + self._add_modeled_parse(response, shape, final_parsed) + return final_parsed + + def _add_modeled_parse(self, response, shape, final_parsed): + if shape is None: + return final_parsed + member_shapes = shape.members + self._parse_non_payload_attrs( + response, shape, member_shapes, final_parsed + ) + self._parse_payload(response, shape, member_shapes, final_parsed) + + def _do_modeled_error_parse(self, response, shape): + final_parsed = {} + self._add_modeled_parse(response, shape, final_parsed) + return final_parsed + + def _populate_response_metadata(self, response): + metadata = {} + headers = response['headers'] + if 'x-amzn-requestid' in headers: + metadata['RequestId'] = headers['x-amzn-requestid'] + elif 'x-amz-request-id' in headers: + metadata['RequestId'] = headers['x-amz-request-id'] + # HostId is what it's called whenever this value is returned + # in an XML response body, so to be consistent, we'll always + # call is HostId. + metadata['HostId'] = headers.get('x-amz-id-2', '') + return metadata + + def _parse_payload(self, response, shape, member_shapes, final_parsed): + if 'payload' in shape.serialization: + # If a payload is specified in the output shape, then only that + # shape is used for the body payload. + payload_member_name = shape.serialization['payload'] + body_shape = member_shapes[payload_member_name] + if body_shape.serialization.get('eventstream'): + body = self._create_event_stream(response, body_shape) + final_parsed[payload_member_name] = body + elif body_shape.type_name in ['string', 'blob']: + # This is a stream + body = response['body'] + if isinstance(body, bytes): + body = body.decode(self.DEFAULT_ENCODING) + final_parsed[payload_member_name] = body + else: + original_parsed = self._initial_body_parse(response['body']) + final_parsed[payload_member_name] = self._parse_shape( + body_shape, original_parsed + ) + else: + original_parsed = self._initial_body_parse(response['body']) + body_parsed = self._parse_shape(shape, original_parsed) + final_parsed.update(body_parsed) + + def _parse_non_payload_attrs( + self, response, shape, member_shapes, final_parsed + ): + headers = response['headers'] + for name in member_shapes: + member_shape = member_shapes[name] + location = member_shape.serialization.get('location') + if location is None: + continue + elif location == 'statusCode': + final_parsed[name] = self._parse_shape( + member_shape, response['status_code'] + ) + elif location == 'headers': + final_parsed[name] = self._parse_header_map( + member_shape, headers + ) + elif location == 'header': + header_name = member_shape.serialization.get('name', name) + if header_name in headers: + final_parsed[name] = self._parse_shape( + member_shape, headers[header_name] + ) + + def _parse_header_map(self, shape, headers): + # Note that headers are case insensitive, so we .lower() + # all header names and header prefixes. + parsed = {} + prefix = shape.serialization.get('name', '').lower() + for header_name in headers: + if header_name.lower().startswith(prefix): + # The key name inserted into the parsed hash + # strips off the prefix. + name = header_name[len(prefix) :] + parsed[name] = headers[header_name] + return parsed + + def _initial_body_parse(self, body_contents): + # This method should do the initial xml/json parsing of the + # body. We still need to walk the parsed body in order + # to convert types, but this method will do the first round + # of parsing. + raise NotImplementedError("_initial_body_parse") + + def _handle_string(self, shape, value): + parsed = value + if is_json_value_header(shape): + decoded = base64.b64decode(value).decode(self.DEFAULT_ENCODING) + parsed = json.loads(decoded) + return parsed + + def _handle_list(self, shape, node): + location = shape.serialization.get('location') + if location == 'header' and not isinstance(node, list): + # List in headers may be a comma separated string as per RFC7230 + node = [e.strip() for e in node.split(',')] + return super()._handle_list(shape, node) + + +class BaseRpcV2Parser(ResponseParser): + def _do_parse(self, response, shape): + parsed = {} + if shape is not None: + event_stream_name = shape.event_stream_name + if event_stream_name: + parsed = self._handle_event_stream( + response, shape, event_stream_name + ) + else: + parsed = {} + self._parse_payload(response, shape, parsed) + parsed['ResponseMetadata'] = self._populate_response_metadata( + response + ) + return parsed + + def _add_modeled_parse(self, response, shape, final_parsed): + if shape is None: + return final_parsed + self._parse_payload(response, shape, final_parsed) + + def _do_modeled_error_parse(self, response, shape): + final_parsed = {} + self._add_modeled_parse(response, shape, final_parsed) + return final_parsed + + def _populate_response_metadata(self, response): + metadata = {} + headers = response['headers'] + if 'x-amzn-requestid' in headers: + metadata['RequestId'] = headers['x-amzn-requestid'] + return metadata + + def _handle_structure(self, shape, node): + parsed = {} + members = shape.members + if shape.is_tagged_union: + cleaned_value = node.copy() + cleaned_value.pop("__type", None) + cleaned_value = { + k: v for k, v in cleaned_value.items() if v is not None + } + if len(cleaned_value) != 1: + error_msg = ( + "Invalid service response: %s must have one and only " + "one member set." + ) + raise ResponseParserError(error_msg % shape.name) + for member_name in members: + member_shape = members[member_name] + member_node = node.get(member_name) + if member_node is not None: + parsed[member_name] = self._parse_shape( + member_shape, member_node + ) + return parsed + + def _parse_payload(self, response, shape, final_parsed): + original_parsed = self._initial_body_parse(response['body']) + body_parsed = self._parse_shape(shape, original_parsed) + final_parsed.update(body_parsed) + + def _initial_body_parse(self, body_contents): + # This method should do the initial parsing of the + # body. We still need to walk the parsed body in order + # to convert types, but this method will do the first round + # of parsing. + raise NotImplementedError("_initial_body_parse") + + +class RestJSONParser(BaseRestParser, BaseJSONParser): + EVENT_STREAM_PARSER_CLS = EventStreamJSONParser + + def _initial_body_parse(self, body_contents): + return self._parse_body_as_json(body_contents) + + def _do_error_parse(self, response, shape): + error = super()._do_error_parse(response, shape) + self._inject_error_code(error, response) + return error + + def _inject_error_code(self, error, response): + # The "Code" value can come from either a response + # header or a value in the JSON body. + body = self._initial_body_parse(response['body']) + code = None + if 'x-amzn-errortype' in response['headers']: + code = response['headers']['x-amzn-errortype'] + elif 'code' in body or 'Code' in body: + code = body.get('code', body.get('Code', '')) + if code is None: + return + if isinstance(code, str): + code = code.split(':', 1)[0].rsplit('#', 1)[-1] + error['Error']['Code'] = code + + def _handle_boolean(self, shape, value): + return ensure_boolean(value) + + def _handle_integer(self, shape, value): + return int(value) + + def _handle_float(self, shape, value): + return float(value) + + _handle_long = _handle_integer + _handle_double = _handle_float + + +class RpcV2CBORParser(BaseRpcV2Parser, BaseCBORParser): + EVENT_STREAM_PARSER_CLS = EventStreamCBORParser + + def _initial_body_parse(self, body_contents): + if body_contents == b'': + return body_contents + body_contents_stream = self.get_peekable_stream_from_bytes( + body_contents + ) + return self.parse_data_item(body_contents_stream) + + def _do_error_parse(self, response, shape): + body = self._initial_body_parse(response['body']) + error = { + "Error": { + "Message": body.get('message', body.get('Message', '')), + "Code": '', + }, + "ResponseMetadata": {}, + } + headers = response['headers'] + + code = body.get('__type') + if code is None: + response_code = response.get('status_code') + if response_code is not None: + code = str(response_code) + if code is not None: + if ':' in code: + code = code.split(':', 1)[0] + if '#' in code: + code = code.rsplit('#', 1)[1] + if 'x-amzn-query-error' in headers: + code = self._do_query_compatible_error_parse( + code, headers, error + ) + error['Error']['Code'] = code + if 'x-amzn-requestid' in headers: + error.setdefault('ResponseMetadata', {})['RequestId'] = headers[ + 'x-amzn-requestid' + ] + return error + + def _handle_event_stream(self, response, shape, event_name): + event_stream_shape = shape.members[event_name] + event_stream = self._create_event_stream(response, event_stream_shape) + try: + event = event_stream.get_initial_response() + except NoInitialResponseError: + error_msg = 'First event was not of type initial-response' + raise ResponseParserError(error_msg) + parsed = self._initial_body_parse(event.payload) + parsed[event_name] = event_stream + return parsed + + +class RestXMLParser(BaseRestParser, BaseXMLResponseParser): + EVENT_STREAM_PARSER_CLS = EventStreamXMLParser + + def _initial_body_parse(self, xml_string): + if not xml_string: + return ETree.Element('') + return self._parse_xml_string_to_dom(xml_string) + + def _do_error_parse(self, response, shape): + # We're trying to be service agnostic here, but S3 does have a slightly + # different response structure for its errors compared to other + # rest-xml serivces (route53/cloudfront). We handle this by just + # trying to parse both forms. + # First: + # + # + # Sender + # InvalidInput + # Invalid resource type: foo + # + # request-id + # + if response['body']: + # If the body ends up being invalid xml, the xml parser should not + # blow up. It should at least try to pull information about the + # the error response from other sources like the HTTP status code. + try: + return self._parse_error_from_body(response) + except ResponseParserError: + LOG.debug( + 'Exception caught when parsing error response body:', + exc_info=True, + ) + return self._parse_error_from_http_status(response) + + def _parse_error_from_http_status(self, response): + return { + 'Error': { + 'Code': str(response['status_code']), + 'Message': http.client.responses.get( + response['status_code'], '' + ), + }, + 'ResponseMetadata': { + 'RequestId': response['headers'].get('x-amz-request-id', ''), + 'HostId': response['headers'].get('x-amz-id-2', ''), + }, + } + + def _parse_error_from_body(self, response): + xml_contents = response['body'] + root = self._parse_xml_string_to_dom(xml_contents) + parsed = self._build_name_to_xml_node(root) + self._replace_nodes(parsed) + if root.tag == 'Error': + # This is an S3 error response. First we'll populate the + # response metadata. + metadata = self._populate_response_metadata(response) + # The RequestId and the HostId are already in the + # ResponseMetadata, but are also duplicated in the XML + # body. We don't need these values in both places, + # we'll just remove them from the parsed XML body. + parsed.pop('RequestId', '') + parsed.pop('HostId', '') + return {'Error': parsed, 'ResponseMetadata': metadata} + elif 'RequestId' in parsed: + # Other rest-xml services: + parsed['ResponseMetadata'] = {'RequestId': parsed.pop('RequestId')} + default = {'Error': {'Message': '', 'Code': ''}} + merge_dicts(default, parsed) + return default + + @_text_content + def _handle_string(self, shape, text): + text = super()._handle_string(shape, text) + return text + + +PROTOCOL_PARSERS = { + 'ec2': EC2QueryParser, + 'query': QueryParser, + 'json': JSONParser, + 'rest-json': RestJSONParser, + 'rest-xml': RestXMLParser, + 'smithy-rpc-v2-cbor': RpcV2CBORParser, +} diff --git a/py311/lib/python3.11/site-packages/botocore/plugin.py b/py311/lib/python3.11/site-packages/botocore/plugin.py new file mode 100644 index 0000000000000000000000000000000000000000..a06a75505abac1c96f097f95bdcaaa487ff9ed02 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/plugin.py @@ -0,0 +1,85 @@ +# Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +""" +NOTE: This module is considered private and is subject to abrupt breaking +changes without prior announcement. Please do not use it directly. +""" + +import importlib +import logging +import os +from contextvars import ContextVar +from dataclasses import dataclass +from typing import Optional + +log = logging.getLogger(__name__) + + +@dataclass +class PluginContext: + """ + Encapsulation of plugins tracked within the `_plugin_context` context variable. + """ + + plugins: Optional[str] = None + + +_plugin_context = ContextVar("_plugin_context") + + +def get_plugin_context(): + """Get the current `_plugin_context` context variable if set, else None.""" + return _plugin_context.get(None) + + +def set_plugin_context(ctx): + """Set the current `_plugin_context` context variable.""" + token = _plugin_context.set(ctx) + return token + + +def reset_plugin_context(token): + """Reset the current `_plugin_context` context variable.""" + _plugin_context.reset(token) + + +def get_botocore_plugins(): + context = get_plugin_context() + if context is not None: + plugins = context.plugins + if plugins is None: + context.plugins = os.environ.get('BOTOCORE_EXPERIMENTAL__PLUGINS') + else: + return plugins + return os.environ.get('BOTOCORE_EXPERIMENTAL__PLUGINS') + + +def load_client_plugins(client, plugins): + for plugin_name, module_name in plugins.items(): + log.debug( + "Importing client plugin %s from module %s", + plugin_name, + module_name, + ) + try: + module = importlib.import_module(module_name) + module.initialize_client_plugin(client) + except ModuleNotFoundError: + log.debug( + "Failed to locate the following plugin module: %s.", + plugin_name, + ) + except Exception as e: + log.debug( + "Error raised during the loading of %s: %s", plugin_name, e + ) diff --git a/py311/lib/python3.11/site-packages/botocore/regions.py b/py311/lib/python3.11/site-packages/botocore/regions.py new file mode 100644 index 0000000000000000000000000000000000000000..db7ece750991d5f09e116477d7b76dc3635a0919 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/regions.py @@ -0,0 +1,869 @@ +# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +"""Resolves regions and endpoints. + +This module implements endpoint resolution, including resolving endpoints for a +given service and region and resolving the available endpoints for a service +in a specific AWS partition. +""" + +import copy +import logging +import re +from enum import Enum + +import jmespath + +from botocore import UNSIGNED, xform_name +from botocore.auth import AUTH_TYPE_MAPS, HAS_CRT +from botocore.crt import CRT_SUPPORTED_AUTH_TYPES +from botocore.endpoint_provider import EndpointProvider +from botocore.exceptions import ( + EndpointProviderError, + EndpointVariantError, + InvalidEndpointConfigurationError, + InvalidHostLabelError, + MissingDependencyException, + NoRegionError, + ParamValidationError, + UnknownEndpointResolutionBuiltInName, + UnknownRegionError, + UnknownSignatureVersionError, + UnsupportedS3AccesspointConfigurationError, + UnsupportedS3ConfigurationError, + UnsupportedS3ControlArnError, + UnsupportedS3ControlConfigurationError, +) +from botocore.useragent import register_feature_id +from botocore.utils import ensure_boolean, instance_cache + +LOG = logging.getLogger(__name__) +DEFAULT_URI_TEMPLATE = '{service}.{region}.{dnsSuffix}' # noqa +DEFAULT_SERVICE_DATA = {'endpoints': {}} + + +class BaseEndpointResolver: + """Resolves regions and endpoints. Must be subclassed.""" + + def construct_endpoint(self, service_name, region_name=None): + """Resolves an endpoint for a service and region combination. + + :type service_name: string + :param service_name: Name of the service to resolve an endpoint for + (e.g., s3) + + :type region_name: string + :param region_name: Region/endpoint name to resolve (e.g., us-east-1) + if no region is provided, the first found partition-wide endpoint + will be used if available. + + :rtype: dict + :return: Returns a dict containing the following keys: + - partition: (string, required) Resolved partition name + - endpointName: (string, required) Resolved endpoint name + - hostname: (string, required) Hostname to use for this endpoint + - sslCommonName: (string) sslCommonName to use for this endpoint. + - credentialScope: (dict) Signature version 4 credential scope + - region: (string) region name override when signing. + - service: (string) service name override when signing. + - signatureVersions: (list) A list of possible signature + versions, including s3, v4, v2, and s3v4 + - protocols: (list) A list of supported protocols + (e.g., http, https) + - ...: Other keys may be included as well based on the metadata + """ + raise NotImplementedError + + def get_available_partitions(self): + """Lists the partitions available to the endpoint resolver. + + :return: Returns a list of partition names (e.g., ["aws", "aws-cn"]). + """ + raise NotImplementedError + + def get_available_endpoints( + self, service_name, partition_name='aws', allow_non_regional=False + ): + """Lists the endpoint names of a particular partition. + + :type service_name: string + :param service_name: Name of a service to list endpoint for (e.g., s3) + + :type partition_name: string + :param partition_name: Name of the partition to limit endpoints to. + (e.g., aws for the public AWS endpoints, aws-cn for AWS China + endpoints, aws-us-gov for AWS GovCloud (US) Endpoints, etc. + + :type allow_non_regional: bool + :param allow_non_regional: Set to True to include endpoints that are + not regional endpoints (e.g., s3-external-1, + fips-us-gov-west-1, etc). + :return: Returns a list of endpoint names (e.g., ["us-east-1"]). + """ + raise NotImplementedError + + +class EndpointResolver(BaseEndpointResolver): + """Resolves endpoints based on partition endpoint metadata""" + + _UNSUPPORTED_DUALSTACK_PARTITIONS = ['aws-iso', 'aws-iso-b'] + + def __init__(self, endpoint_data, uses_builtin_data=False): + """ + :type endpoint_data: dict + :param endpoint_data: A dict of partition data. + + :type uses_builtin_data: boolean + :param uses_builtin_data: Whether the endpoint data originates in the + package's data directory. + """ + if 'partitions' not in endpoint_data: + raise ValueError('Missing "partitions" in endpoint data') + self._endpoint_data = endpoint_data + self.uses_builtin_data = uses_builtin_data + + def get_service_endpoints_data(self, service_name, partition_name='aws'): + for partition in self._endpoint_data['partitions']: + if partition['partition'] != partition_name: + continue + services = partition['services'] + if service_name not in services: + continue + return services[service_name]['endpoints'] + + def get_available_partitions(self): + result = [] + for partition in self._endpoint_data['partitions']: + result.append(partition['partition']) + return result + + def get_available_endpoints( + self, + service_name, + partition_name='aws', + allow_non_regional=False, + endpoint_variant_tags=None, + ): + result = [] + for partition in self._endpoint_data['partitions']: + if partition['partition'] != partition_name: + continue + services = partition['services'] + if service_name not in services: + continue + service_endpoints = services[service_name]['endpoints'] + for endpoint_name in service_endpoints: + is_regional_endpoint = endpoint_name in partition['regions'] + # Only regional endpoints can be modeled with variants + if endpoint_variant_tags and is_regional_endpoint: + variant_data = self._retrieve_variant_data( + service_endpoints[endpoint_name], endpoint_variant_tags + ) + if variant_data: + result.append(endpoint_name) + elif allow_non_regional or is_regional_endpoint: + result.append(endpoint_name) + return result + + def get_partition_dns_suffix( + self, partition_name, endpoint_variant_tags=None + ): + for partition in self._endpoint_data['partitions']: + if partition['partition'] == partition_name: + if endpoint_variant_tags: + variant = self._retrieve_variant_data( + partition.get('defaults'), endpoint_variant_tags + ) + if variant and 'dnsSuffix' in variant: + return variant['dnsSuffix'] + else: + return partition['dnsSuffix'] + return None + + def construct_endpoint( + self, + service_name, + region_name=None, + partition_name=None, + use_dualstack_endpoint=False, + use_fips_endpoint=False, + ): + if ( + service_name == 's3' + and use_dualstack_endpoint + and region_name is None + ): + region_name = 'us-east-1' + + if partition_name is not None: + valid_partition = None + for partition in self._endpoint_data['partitions']: + if partition['partition'] == partition_name: + valid_partition = partition + + if valid_partition is not None: + result = self._endpoint_for_partition( + valid_partition, + service_name, + region_name, + use_dualstack_endpoint, + use_fips_endpoint, + True, + ) + return result + return None + + # Iterate over each partition until a match is found. + for partition in self._endpoint_data['partitions']: + if use_dualstack_endpoint and ( + partition['partition'] + in self._UNSUPPORTED_DUALSTACK_PARTITIONS + ): + continue + result = self._endpoint_for_partition( + partition, + service_name, + region_name, + use_dualstack_endpoint, + use_fips_endpoint, + ) + if result: + return result + + def get_partition_for_region(self, region_name): + for partition in self._endpoint_data['partitions']: + if self._region_match(partition, region_name): + return partition['partition'] + raise UnknownRegionError( + region_name=region_name, + error_msg='No partition found for provided region_name.', + ) + + def _endpoint_for_partition( + self, + partition, + service_name, + region_name, + use_dualstack_endpoint, + use_fips_endpoint, + force_partition=False, + ): + partition_name = partition["partition"] + if ( + use_dualstack_endpoint + and partition_name in self._UNSUPPORTED_DUALSTACK_PARTITIONS + ): + error_msg = ( + "Dualstack endpoints are currently not supported" + f" for {partition_name} partition" + ) + raise EndpointVariantError(tags=['dualstack'], error_msg=error_msg) + + # Get the service from the partition, or an empty template. + service_data = partition['services'].get( + service_name, DEFAULT_SERVICE_DATA + ) + # Use the partition endpoint if no region is supplied. + if region_name is None: + if 'partitionEndpoint' in service_data: + region_name = service_data['partitionEndpoint'] + else: + raise NoRegionError() + + resolve_kwargs = { + 'partition': partition, + 'service_name': service_name, + 'service_data': service_data, + 'endpoint_name': region_name, + 'use_dualstack_endpoint': use_dualstack_endpoint, + 'use_fips_endpoint': use_fips_endpoint, + } + + # Attempt to resolve the exact region for this partition. + if region_name in service_data['endpoints']: + return self._resolve(**resolve_kwargs) + + # Check to see if the endpoint provided is valid for the partition. + if self._region_match(partition, region_name) or force_partition: + # Use the partition endpoint if set and not regionalized. + partition_endpoint = service_data.get('partitionEndpoint') + is_regionalized = service_data.get('isRegionalized', True) + if partition_endpoint and not is_regionalized: + LOG.debug( + 'Using partition endpoint for %s, %s: %s', + service_name, + region_name, + partition_endpoint, + ) + resolve_kwargs['endpoint_name'] = partition_endpoint + return self._resolve(**resolve_kwargs) + LOG.debug( + 'Creating a regex based endpoint for %s, %s', + service_name, + region_name, + ) + return self._resolve(**resolve_kwargs) + + def _region_match(self, partition, region_name): + if region_name in partition['regions']: + return True + if 'regionRegex' in partition: + return re.compile(partition['regionRegex']).match(region_name) + return False + + def _retrieve_variant_data(self, endpoint_data, tags): + variants = endpoint_data.get('variants', []) + for variant in variants: + if set(variant['tags']) == set(tags): + result = variant.copy() + return result + + def _create_tag_list(self, use_dualstack_endpoint, use_fips_endpoint): + tags = [] + if use_dualstack_endpoint: + tags.append('dualstack') + if use_fips_endpoint: + tags.append('fips') + return tags + + def _resolve_variant( + self, tags, endpoint_data, service_defaults, partition_defaults + ): + result = {} + for variants in [endpoint_data, service_defaults, partition_defaults]: + variant = self._retrieve_variant_data(variants, tags) + if variant: + self._merge_keys(variant, result) + return result + + def _resolve( + self, + partition, + service_name, + service_data, + endpoint_name, + use_dualstack_endpoint, + use_fips_endpoint, + ): + endpoint_data = service_data.get('endpoints', {}).get( + endpoint_name, {} + ) + + if endpoint_data.get('deprecated'): + LOG.warning( + 'Client is configured with the deprecated endpoint: %s', + endpoint_name, + ) + + service_defaults = service_data.get('defaults', {}) + partition_defaults = partition.get('defaults', {}) + tags = self._create_tag_list(use_dualstack_endpoint, use_fips_endpoint) + + if tags: + result = self._resolve_variant( + tags, endpoint_data, service_defaults, partition_defaults + ) + if result == {}: + error_msg = ( + f"Endpoint does not exist for {service_name} " + f"in region {endpoint_name}" + ) + raise EndpointVariantError(tags=tags, error_msg=error_msg) + self._merge_keys(endpoint_data, result) + else: + result = endpoint_data + + # If dnsSuffix has not already been consumed from a variant definition + if 'dnsSuffix' not in result: + result['dnsSuffix'] = partition['dnsSuffix'] + + result['partition'] = partition['partition'] + result['endpointName'] = endpoint_name + + # Merge in the service defaults then the partition defaults. + self._merge_keys(service_defaults, result) + self._merge_keys(partition_defaults, result) + + result['hostname'] = self._expand_template( + partition, + result['hostname'], + service_name, + endpoint_name, + result['dnsSuffix'], + ) + if 'sslCommonName' in result: + result['sslCommonName'] = self._expand_template( + partition, + result['sslCommonName'], + service_name, + endpoint_name, + result['dnsSuffix'], + ) + + return result + + def _merge_keys(self, from_data, result): + for key in from_data: + if key not in result: + result[key] = from_data[key] + + def _expand_template( + self, partition, template, service_name, endpoint_name, dnsSuffix + ): + return template.format( + service=service_name, region=endpoint_name, dnsSuffix=dnsSuffix + ) + + +class EndpointResolverBuiltins(str, Enum): + # The AWS Region configured for the SDK client (str) + AWS_REGION = "AWS::Region" + # Whether the UseFIPSEndpoint configuration option has been enabled for + # the SDK client (bool) + AWS_USE_FIPS = "AWS::UseFIPS" + # Whether the UseDualStackEndpoint configuration option has been enabled + # for the SDK client (bool) + AWS_USE_DUALSTACK = "AWS::UseDualStack" + # Whether the global endpoint should be used with STS, rather than the + # regional endpoint for us-east-1 (bool) + AWS_STS_USE_GLOBAL_ENDPOINT = "AWS::STS::UseGlobalEndpoint" + # Whether the global endpoint should be used with S3, rather than the + # regional endpoint for us-east-1 (bool) + AWS_S3_USE_GLOBAL_ENDPOINT = "AWS::S3::UseGlobalEndpoint" + # Whether S3 Transfer Acceleration has been requested (bool) + AWS_S3_ACCELERATE = "AWS::S3::Accelerate" + # Whether S3 Force Path Style has been enabled (bool) + AWS_S3_FORCE_PATH_STYLE = "AWS::S3::ForcePathStyle" + # Whether to use the ARN region or raise an error when ARN and client + # region differ (for s3 service only, bool) + AWS_S3_USE_ARN_REGION = "AWS::S3::UseArnRegion" + # Whether to use the ARN region or raise an error when ARN and client + # region differ (for s3-control service only, bool) + AWS_S3CONTROL_USE_ARN_REGION = 'AWS::S3Control::UseArnRegion' + # Whether multi-region access points (MRAP) should be disabled (bool) + AWS_S3_DISABLE_MRAP = "AWS::S3::DisableMultiRegionAccessPoints" + # Whether a custom endpoint has been configured (str) + SDK_ENDPOINT = "SDK::Endpoint" + # An AWS account ID that can be optionally configured for the SDK client (str) + ACCOUNT_ID = "AWS::Auth::AccountId" + # Whether an endpoint should include an account ID (str) + ACCOUNT_ID_ENDPOINT_MODE = "AWS::Auth::AccountIdEndpointMode" + + +class EndpointRulesetResolver: + """Resolves endpoints using a service's endpoint ruleset""" + + def __init__( + self, + endpoint_ruleset_data, + partition_data, + service_model, + builtins, + client_context, + event_emitter, + use_ssl=True, + requested_auth_scheme=None, + ): + self._provider = EndpointProvider( + ruleset_data=endpoint_ruleset_data, + partition_data=partition_data, + ) + self._param_definitions = self._provider.ruleset.parameters + self._service_model = service_model + self._builtins = builtins + self._client_context = client_context + self._event_emitter = event_emitter + self._use_ssl = use_ssl + self._requested_auth_scheme = requested_auth_scheme + self._instance_cache = {} + + def construct_endpoint( + self, + operation_model, + call_args, + request_context, + ): + """Invokes the provider with params defined in the service's ruleset""" + if call_args is None: + call_args = {} + + if request_context is None: + request_context = {} + + provider_params = self._get_provider_params( + operation_model, call_args, request_context + ) + LOG.debug( + 'Calling endpoint provider with parameters: %s', provider_params + ) + try: + provider_result = self._provider.resolve_endpoint( + **provider_params + ) + except EndpointProviderError as ex: + botocore_exception = self.ruleset_error_to_botocore_exception( + ex, provider_params + ) + if botocore_exception is None: + raise + else: + raise botocore_exception from ex + LOG.debug('Endpoint provider result: %s', provider_result.url) + + # The endpoint provider does not support non-secure transport. + if ( + not self._use_ssl + and provider_result.url.startswith('https://') + and 'Endpoint' not in provider_params + ): + provider_result = provider_result._replace( + url=f'http://{provider_result.url[8:]}' + ) + + # Multi-valued headers are not supported in botocore. Replace the list + # of values returned for each header with just its first entry, + # dropping any additionally entries. + provider_result = provider_result._replace( + headers={ + key: val[0] for key, val in provider_result.headers.items() + } + ) + + return provider_result + + def _get_provider_params( + self, operation_model, call_args, request_context + ): + """Resolve a value for each parameter defined in the service's ruleset + + The resolution order for parameter values is: + 1. Operation-specific static context values from the service definition + 2. Operation-specific dynamic context values from API parameters + 3. Client-specific context parameters + 4. Built-in values such as region, FIPS usage, ... + """ + provider_params = {} + # Builtin values can be customized for each operation by hooks + # subscribing to the ``before-endpoint-resolution.*`` event. + customized_builtins = self._get_customized_builtins( + operation_model, call_args, request_context + ) + for param_name, param_def in self._param_definitions.items(): + param_val = self._resolve_param_from_context( + param_name=param_name, + operation_model=operation_model, + call_args=call_args, + ) + if param_val is None and param_def.builtin is not None: + param_val = self._resolve_param_as_builtin( + builtin_name=param_def.builtin, + builtins=customized_builtins, + ) + if param_val is not None: + provider_params[param_name] = param_val + self._register_endpoint_feature_ids(param_name, param_val) + + return provider_params + + def _resolve_param_from_context( + self, param_name, operation_model, call_args + ): + static = self._resolve_param_as_static_context_param( + param_name, operation_model + ) + if static is not None: + return static + dynamic = self._resolve_param_as_dynamic_context_param( + param_name, operation_model, call_args + ) + if dynamic is not None: + return dynamic + operation_context_params = ( + self._resolve_param_as_operation_context_param( + param_name, operation_model, call_args + ) + ) + if operation_context_params is not None: + return operation_context_params + return self._resolve_param_as_client_context_param(param_name) + + def _resolve_param_as_static_context_param( + self, param_name, operation_model + ): + static_ctx_params = self._get_static_context_params(operation_model) + return static_ctx_params.get(param_name) + + def _resolve_param_as_dynamic_context_param( + self, param_name, operation_model, call_args + ): + dynamic_ctx_params = self._get_dynamic_context_params(operation_model) + if param_name in dynamic_ctx_params: + member_name = dynamic_ctx_params[param_name] + return call_args.get(member_name) + + def _resolve_param_as_client_context_param(self, param_name): + client_ctx_params = self._get_client_context_params() + if param_name in client_ctx_params: + client_ctx_varname = client_ctx_params[param_name] + return self._client_context.get(client_ctx_varname) + + def _resolve_param_as_operation_context_param( + self, param_name, operation_model, call_args + ): + operation_ctx_params = operation_model.operation_context_parameters + if param_name in operation_ctx_params: + path = operation_ctx_params[param_name]['path'] + return jmespath.search(path, call_args) + + def _resolve_param_as_builtin(self, builtin_name, builtins): + if builtin_name not in EndpointResolverBuiltins.__members__.values(): + raise UnknownEndpointResolutionBuiltInName(name=builtin_name) + builtin = builtins.get(builtin_name) + if callable(builtin): + return builtin() + return builtin + + @instance_cache + def _get_static_context_params(self, operation_model): + """Mapping of param names to static param value for an operation""" + return { + param.name: param.value + for param in operation_model.static_context_parameters + } + + @instance_cache + def _get_dynamic_context_params(self, operation_model): + """Mapping of param names to member names for an operation""" + return { + param.name: param.member_name + for param in operation_model.context_parameters + } + + @instance_cache + def _get_client_context_params(self): + """Mapping of param names to client configuration variable""" + return { + param.name: xform_name(param.name) + for param in self._service_model.client_context_parameters + } + + def _get_customized_builtins( + self, operation_model, call_args, request_context + ): + service_id = self._service_model.service_id.hyphenize() + customized_builtins = copy.copy(self._builtins) + # Handlers are expected to modify the builtins dict in place. + self._event_emitter.emit( + f'before-endpoint-resolution.{service_id}', + builtins=customized_builtins, + model=operation_model, + params=call_args, + context=request_context, + ) + return customized_builtins + + def auth_schemes_to_signing_ctx(self, auth_schemes): + """Convert an Endpoint's authSchemes property to a signing_context dict + + :type auth_schemes: list + :param auth_schemes: A list of dictionaries taken from the + ``authSchemes`` property of an Endpoint object returned by + ``EndpointProvider``. + + :rtype: str, dict + :return: Tuple of auth type string (to be used in + ``request_context['auth_type']``) and signing context dict (for use + in ``request_context['signing']``). + """ + if not isinstance(auth_schemes, list) or len(auth_schemes) == 0: + raise TypeError("auth_schemes must be a non-empty list.") + + LOG.debug( + 'Selecting from endpoint provider\'s list of auth schemes: %s. ' + 'User selected auth scheme is: "%s"', + ', '.join([f'"{s.get("name")}"' for s in auth_schemes]), + self._requested_auth_scheme, + ) + + if self._requested_auth_scheme == UNSIGNED: + return 'none', {} + + auth_schemes = [ + {**scheme, 'name': self._strip_sig_prefix(scheme['name'])} + for scheme in auth_schemes + ] + if self._requested_auth_scheme is not None: + try: + # Use the first scheme that matches the requested scheme, + # after accounting for naming differences between botocore and + # endpoint rulesets. Keep the requested name. + name, scheme = next( + (self._requested_auth_scheme, s) + for s in auth_schemes + if self._does_botocore_authname_match_ruleset_authname( + self._requested_auth_scheme, s['name'] + ) + ) + except StopIteration: + # For legacy signers, no match will be found. Do not raise an + # exception, instead default to the logic in botocore + # customizations. + return None, {} + else: + try: + name, scheme = next( + (s['name'], s) + for s in auth_schemes + if s['name'] in AUTH_TYPE_MAPS + ) + except StopIteration: + # If no auth scheme was specifically requested and an + # authSchemes list is present in the Endpoint object but none + # of the entries are supported, raise an exception. + fixable_with_crt = False + auth_type_options = [s['name'] for s in auth_schemes] + if not HAS_CRT: + fixable_with_crt = any( + scheme in CRT_SUPPORTED_AUTH_TYPES + for scheme in auth_type_options + ) + + if fixable_with_crt: + raise MissingDependencyException( + msg='This operation requires an additional dependency.' + ' Use pip install botocore[crt] before proceeding.' + ) + else: + raise UnknownSignatureVersionError( + signature_version=', '.join(auth_type_options) + ) + + signing_context = {} + if 'signingRegion' in scheme: + signing_context['region'] = scheme['signingRegion'] + elif 'signingRegionSet' in scheme: + if len(scheme['signingRegionSet']) > 0: + signing_context['region'] = ','.join( + scheme['signingRegionSet'] + ) + if 'signingName' in scheme: + signing_context.update(signing_name=scheme['signingName']) + if 'disableDoubleEncoding' in scheme: + signing_context['disableDoubleEncoding'] = ensure_boolean( + scheme['disableDoubleEncoding'] + ) + + LOG.debug( + 'Selected auth type "%s" as "%s" with signing context params: %s', + scheme['name'], # original name without "sig" + name, # chosen name can differ when `signature_version` is set + signing_context, + ) + return name, signing_context + + def _strip_sig_prefix(self, auth_name): + """Normalize auth type names by removing any "sig" prefix""" + return auth_name[3:] if auth_name.startswith('sig') else auth_name + + def _does_botocore_authname_match_ruleset_authname(self, botoname, rsname): + """ + Whether a valid string provided as signature_version parameter for + client construction refers to the same auth methods as a string + returned by the endpoint ruleset provider. This accounts for: + + * The ruleset prefixes auth names with "sig" + * The s3 and s3control rulesets don't distinguish between v4[a] and + s3v4[a] signers + * The v2, v3, and HMAC v1 based signers (s3, s3-*) are botocore legacy + features and do not exist in the rulesets + * Only characters up to the first dash are considered + + Example matches: + * v4, sigv4 + * v4, v4 + * s3v4, sigv4 + * s3v7, sigv7 (hypothetical example) + * s3v4a, sigv4a + * s3v4-query, sigv4 + + Example mismatches: + * v4a, sigv4 + * s3, sigv4 + * s3-presign-post, sigv4 + """ + rsname = self._strip_sig_prefix(rsname) + botoname = botoname.split('-')[0] + if botoname != 's3' and botoname.startswith('s3'): + botoname = botoname[2:] + return rsname == botoname + + def ruleset_error_to_botocore_exception(self, ruleset_exception, params): + """Attempts to translate ruleset errors to pre-existing botocore + exception types by string matching exception strings. + """ + msg = ruleset_exception.kwargs.get('msg') + if msg is None: + return + + if msg.startswith('Invalid region in ARN: '): + # Example message: + # "Invalid region in ARN: `us-we$t-2` (invalid DNS name)" + try: + label = msg.split('`')[1] + except IndexError: + label = msg + return InvalidHostLabelError(label=label) + + service_name = self._service_model.service_name + if service_name == 's3': + if ( + msg == 'S3 Object Lambda does not support S3 Accelerate' + or msg == 'Accelerate cannot be used with FIPS' + ): + return UnsupportedS3ConfigurationError(msg=msg) + if ( + msg.startswith('S3 Outposts does not support') + or msg.startswith('S3 MRAP does not support') + or msg.startswith('S3 Object Lambda does not support') + or msg.startswith('Access Points do not support') + or msg.startswith('Invalid configuration:') + or msg.startswith('Client was configured for partition') + ): + return UnsupportedS3AccesspointConfigurationError(msg=msg) + if msg.lower().startswith('invalid arn:'): + return ParamValidationError(report=msg) + if service_name == 's3control': + if msg.startswith('Invalid ARN:'): + arn = params.get('Bucket') + return UnsupportedS3ControlArnError(arn=arn, msg=msg) + if msg.startswith('Invalid configuration:') or msg.startswith( + 'Client was configured for partition' + ): + return UnsupportedS3ControlConfigurationError(msg=msg) + if msg == "AccountId is required but not set": + return ParamValidationError(report=msg) + if service_name == 'events': + if msg.startswith( + 'Invalid Configuration: FIPS is not supported with ' + 'EventBridge multi-region endpoints.' + ): + return InvalidEndpointConfigurationError(msg=msg) + if msg == 'EndpointId must be a valid host label.': + return InvalidEndpointConfigurationError(msg=msg) + return None + + def _register_endpoint_feature_ids(self, param_name, param_val): + if param_name == 'AccountIdEndpointMode': + register_feature_id(f'ACCOUNT_ID_MODE_{param_val.upper()}') + elif param_name == 'AccountId': + register_feature_id('RESOLVED_ACCOUNT_ID') diff --git a/py311/lib/python3.11/site-packages/botocore/response.py b/py311/lib/python3.11/site-packages/botocore/response.py new file mode 100644 index 0000000000000000000000000000000000000000..e123b547168c361fefd503ecac3b4d8b5475674b --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/response.py @@ -0,0 +1,216 @@ +# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/ +# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +import logging +from io import IOBase + +from urllib3.exceptions import ProtocolError as URLLib3ProtocolError +from urllib3.exceptions import ReadTimeoutError as URLLib3ReadTimeoutError + +from botocore import ( + ScalarTypes, # noqa: F401 + parsers, +) +from botocore.compat import ( + XMLParseError, # noqa: F401 + set_socket_timeout, +) +from botocore.exceptions import ( + IncompleteReadError, + ReadTimeoutError, + ResponseStreamingError, +) +from botocore.hooks import first_non_none_response # noqa + +logger = logging.getLogger(__name__) + + +class StreamingBody(IOBase): + """Wrapper class for an http response body. + + This provides a few additional conveniences that do not exist + in the urllib3 model: + + * Set the timeout on the socket (i.e read() timeouts) + * Auto validation of content length, if the amount of bytes + we read does not match the content length, an exception + is raised. + + """ + + _DEFAULT_CHUNK_SIZE = 1024 + + def __init__(self, raw_stream, content_length): + self._raw_stream = raw_stream + self._content_length = content_length + self._amount_read = 0 + + def __del__(self): + # Extending destructor in order to preserve the underlying raw_stream. + # The ability to add custom cleanup logic introduced in Python3.4+. + # https://www.python.org/dev/peps/pep-0442/ + pass + + def set_socket_timeout(self, timeout): + """Set the timeout seconds on the socket.""" + # The problem we're trying to solve is to prevent .read() calls from + # hanging. This can happen in rare cases. What we'd like to ideally + # do is set a timeout on the .read() call so that callers can retry + # the request. + # Unfortunately, this isn't currently possible in requests. + # See: https://github.com/kennethreitz/requests/issues/1803 + # So what we're going to do is reach into the guts of the stream and + # grab the socket object, which we can set the timeout on. We're + # putting in a check here so in case this interface goes away, we'll + # know. + try: + set_socket_timeout(self._raw_stream, timeout) + except AttributeError: + logger.exception( + "Cannot access the socket object of a streaming response. " + "It's possible the interface has changed." + ) + raise + + def readable(self): + try: + return self._raw_stream.readable() + except AttributeError: + return False + + def read(self, amt=None): + """Read at most amt bytes from the stream. + + If the amt argument is omitted, read all data. + """ + try: + chunk = self._raw_stream.read(amt) + except URLLib3ReadTimeoutError as e: + # TODO: the url will be None as urllib3 isn't setting it yet + raise ReadTimeoutError(endpoint_url=e.url, error=e) + except URLLib3ProtocolError as e: + raise ResponseStreamingError(error=e) + self._amount_read += len(chunk) + if amt is None or (not chunk and amt > 0): + # If the server sends empty contents or + # we ask to read all of the contents, then we know + # we need to verify the content length. + self._verify_content_length() + return chunk + + def readinto(self, b): + """Read bytes into a pre-allocated, writable bytes-like object b, and return the number of bytes read.""" + try: + amount_read = self._raw_stream.readinto(b) + except URLLib3ReadTimeoutError as e: + # TODO: the url will be None as urllib3 isn't setting it yet + raise ReadTimeoutError(endpoint_url=e.url, error=e) + except URLLib3ProtocolError as e: + raise ResponseStreamingError(error=e) + self._amount_read += amount_read + if amount_read == 0 and len(b) > 0: + # If the server sends empty contents then we know we need to verify + # the content length. + self._verify_content_length() + return amount_read + + def readlines(self): + return self._raw_stream.readlines() + + def __iter__(self): + """Return an iterator to yield 1k chunks from the raw stream.""" + return self.iter_chunks(self._DEFAULT_CHUNK_SIZE) + + def __next__(self): + """Return the next 1k chunk from the raw stream.""" + current_chunk = self.read(self._DEFAULT_CHUNK_SIZE) + if current_chunk: + return current_chunk + raise StopIteration() + + def __enter__(self): + return self._raw_stream + + def __exit__(self, type, value, traceback): + self._raw_stream.close() + + next = __next__ + + def iter_lines(self, chunk_size=_DEFAULT_CHUNK_SIZE, keepends=False): + """Return an iterator to yield lines from the raw stream. + + This is achieved by reading chunk of bytes (of size chunk_size) at a + time from the raw stream, and then yielding lines from there. + """ + pending = b'' + for chunk in self.iter_chunks(chunk_size): + lines = (pending + chunk).splitlines(True) + for line in lines[:-1]: + yield line.splitlines(keepends)[0] + pending = lines[-1] + if pending: + yield pending.splitlines(keepends)[0] + + def iter_chunks(self, chunk_size=_DEFAULT_CHUNK_SIZE): + """Return an iterator to yield chunks of chunk_size bytes from the raw + stream. + """ + while True: + current_chunk = self.read(chunk_size) + if current_chunk == b"": + break + yield current_chunk + + def _verify_content_length(self): + # See: https://github.com/kennethreitz/requests/issues/1855 + # Basically, our http library doesn't do this for us, so we have + # to do this ourself. + if self._content_length is not None and self._amount_read != int( + self._content_length + ): + raise IncompleteReadError( + actual_bytes=self._amount_read, + expected_bytes=int(self._content_length), + ) + + def tell(self): + return self._raw_stream.tell() + + def close(self): + """Close the underlying http response stream.""" + self._raw_stream.close() + + +def get_response(operation_model, http_response): + protocol = operation_model.service_model.resolved_protocol + response_dict = { + 'headers': http_response.headers, + 'status_code': http_response.status_code, + } + # TODO: Unfortunately, we have to have error logic here. + # If it looks like an error, in the streaming response case we + # need to actually grab the contents. + if response_dict['status_code'] >= 300: + response_dict['body'] = http_response.content + elif operation_model.has_streaming_output: + response_dict['body'] = StreamingBody( + http_response.raw, response_dict['headers'].get('content-length') + ) + else: + response_dict['body'] = http_response.content + + parser = parsers.create_parser(protocol) + return http_response, parser.parse( + response_dict, operation_model.output_shape + ) diff --git a/py311/lib/python3.11/site-packages/botocore/retries/__init__.py b/py311/lib/python3.11/site-packages/botocore/retries/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a6d6b377dfcdf246972c05659673308cfa40db37 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/retries/__init__.py @@ -0,0 +1,6 @@ +"""New retry v2 handlers. + +This package obsoletes the botocore/retryhandler.py module and contains +new retry logic. + +""" diff --git a/py311/lib/python3.11/site-packages/botocore/retries/adaptive.py b/py311/lib/python3.11/site-packages/botocore/retries/adaptive.py new file mode 100644 index 0000000000000000000000000000000000000000..5e638ddb7b8cc46b2fd12eb37bc9fbc68a744da7 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/retries/adaptive.py @@ -0,0 +1,132 @@ +import logging +import math +import threading + +from botocore.retries import bucket, standard, throttling + +logger = logging.getLogger(__name__) + + +def register_retry_handler(client): + clock = bucket.Clock() + rate_adjustor = throttling.CubicCalculator( + starting_max_rate=0, start_time=clock.current_time() + ) + token_bucket = bucket.TokenBucket(max_rate=1, clock=clock) + rate_clocker = RateClocker(clock) + throttling_detector = standard.ThrottlingErrorDetector( + retry_event_adapter=standard.RetryEventAdapter(), + ) + limiter = ClientRateLimiter( + rate_adjustor=rate_adjustor, + rate_clocker=rate_clocker, + token_bucket=token_bucket, + throttling_detector=throttling_detector, + clock=clock, + ) + client.meta.events.register( + 'before-send', + limiter.on_sending_request, + ) + client.meta.events.register( + 'needs-retry', + limiter.on_receiving_response, + ) + return limiter + + +class ClientRateLimiter: + _MAX_RATE_ADJUST_SCALE = 2.0 + + def __init__( + self, + rate_adjustor, + rate_clocker, + token_bucket, + throttling_detector, + clock, + ): + self._rate_adjustor = rate_adjustor + self._rate_clocker = rate_clocker + self._token_bucket = token_bucket + self._throttling_detector = throttling_detector + self._clock = clock + self._enabled = False + self._lock = threading.Lock() + + def on_sending_request(self, request, **kwargs): + if self._enabled: + self._token_bucket.acquire() + + # Hooked up to needs-retry. + def on_receiving_response(self, **kwargs): + measured_rate = self._rate_clocker.record() + timestamp = self._clock.current_time() + with self._lock: + if not self._throttling_detector.is_throttling_error(**kwargs): + new_rate = self._rate_adjustor.success_received(timestamp) + else: + if not self._enabled: + rate_to_use = measured_rate + else: + rate_to_use = min( + measured_rate, self._token_bucket.max_rate + ) + new_rate = self._rate_adjustor.error_received( + rate_to_use, timestamp + ) + logger.debug( + "Throttling response received, new send rate: %s " + "measured rate: %s, token bucket capacity " + "available: %s", + new_rate, + measured_rate, + self._token_bucket.available_capacity, + ) + self._enabled = True + self._token_bucket.max_rate = min( + new_rate, self._MAX_RATE_ADJUST_SCALE * measured_rate + ) + + +class RateClocker: + """Tracks the rate at which a client is sending a request.""" + + _DEFAULT_SMOOTHING = 0.8 + # Update the rate every _TIME_BUCKET_RANGE seconds. + _TIME_BUCKET_RANGE = 0.5 + + def __init__( + self, + clock, + smoothing=_DEFAULT_SMOOTHING, + time_bucket_range=_TIME_BUCKET_RANGE, + ): + self._clock = clock + self._measured_rate = 0 + self._smoothing = smoothing + self._last_bucket = math.floor(self._clock.current_time()) + self._time_bucket_scale = 1 / self._TIME_BUCKET_RANGE + self._count = 0 + self._lock = threading.Lock() + + def record(self, amount=1): + with self._lock: + t = self._clock.current_time() + bucket = ( + math.floor(t * self._time_bucket_scale) + / self._time_bucket_scale + ) + self._count += amount + if bucket > self._last_bucket: + current_rate = self._count / float(bucket - self._last_bucket) + self._measured_rate = (current_rate * self._smoothing) + ( + self._measured_rate * (1 - self._smoothing) + ) + self._count = 0 + self._last_bucket = bucket + return self._measured_rate + + @property + def measured_rate(self): + return self._measured_rate diff --git a/py311/lib/python3.11/site-packages/botocore/retries/base.py b/py311/lib/python3.11/site-packages/botocore/retries/base.py new file mode 100644 index 0000000000000000000000000000000000000000..108bfed6901ae6f21e66a3e45d95176a0a18e8ff --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/retries/base.py @@ -0,0 +1,26 @@ +class BaseRetryBackoff: + def delay_amount(self, context): + """Calculate how long we should delay before retrying. + + :type context: RetryContext + + """ + raise NotImplementedError("delay_amount") + + +class BaseRetryableChecker: + """Base class for determining if a retry should happen. + + This base class checks for specific retryable conditions. + A single retryable checker doesn't necessarily indicate a retry + will happen. It's up to the ``RetryPolicy`` to use its + ``BaseRetryableCheckers`` to make the final decision on whether a retry + should happen. + """ + + def is_retryable(self, context): + """Returns True if retryable, False if not. + + :type context: RetryContext + """ + raise NotImplementedError("is_retryable") diff --git a/py311/lib/python3.11/site-packages/botocore/retries/bucket.py b/py311/lib/python3.11/site-packages/botocore/retries/bucket.py new file mode 100644 index 0000000000000000000000000000000000000000..09d33c77d0afb5ccf8a76af8f5316bed9d292c6b --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/retries/bucket.py @@ -0,0 +1,115 @@ +"""This module implements token buckets used for client side throttling.""" + +import threading +import time + +from botocore.exceptions import CapacityNotAvailableError + + +class Clock: + def __init__(self): + pass + + def sleep(self, amount): + time.sleep(amount) + + def current_time(self): + return time.time() + + +class TokenBucket: + _MIN_RATE = 0.5 + + def __init__(self, max_rate, clock, min_rate=_MIN_RATE): + self._fill_rate = None + self._max_capacity = None + self._current_capacity = 0 + self._clock = clock + self._last_timestamp = None + self._min_rate = min_rate + self._lock = threading.Lock() + self._new_fill_rate_condition = threading.Condition(self._lock) + self.max_rate = max_rate + + @property + def max_rate(self): + return self._fill_rate + + @max_rate.setter + def max_rate(self, value): + with self._new_fill_rate_condition: + # Before we can change the rate we need to fill any pending + # tokens we might have based on the current rate. If we don't + # do this it means everything since the last recorded timestamp + # will accumulate at the rate we're about to set which isn't + # correct. + self._refill() + self._fill_rate = max(value, self._min_rate) + if value >= 1: + self._max_capacity = value + else: + self._max_capacity = 1 + # If we're scaling down, we also can't have a capacity that's + # more than our max_capacity. + self._current_capacity = min( + self._current_capacity, self._max_capacity + ) + self._new_fill_rate_condition.notify() + + @property + def max_capacity(self): + return self._max_capacity + + @property + def available_capacity(self): + return self._current_capacity + + def acquire(self, amount=1, block=True): + """Acquire token or return amount of time until next token available. + + If block is True, then this method will block until there's sufficient + capacity to acquire the desired amount. + + If block is False, then this method will return True is capacity + was successfully acquired, False otherwise. + + """ + with self._new_fill_rate_condition: + return self._acquire(amount=amount, block=block) + + def _acquire(self, amount, block): + self._refill() + if amount <= self._current_capacity: + self._current_capacity -= amount + return True + else: + if not block: + raise CapacityNotAvailableError() + # Not enough capacity. + sleep_amount = self._sleep_amount(amount) + while sleep_amount > 0: + # Until python3.2, wait() always returned None so we can't + # tell if a timeout occurred waiting on the cond var. + # Because of this we'll unconditionally call _refill(). + # The downside to this is that we were waken up via + # a notify(), we're calling unnecessarily calling _refill() an + # extra time. + self._new_fill_rate_condition.wait(sleep_amount) + self._refill() + sleep_amount = self._sleep_amount(amount) + self._current_capacity -= amount + return True + + def _sleep_amount(self, amount): + return (amount - self._current_capacity) / self._fill_rate + + def _refill(self): + timestamp = self._clock.current_time() + if self._last_timestamp is None: + self._last_timestamp = timestamp + return + current_capacity = self._current_capacity + fill_amount = (timestamp - self._last_timestamp) * self._fill_rate + new_capacity = min(self._max_capacity, current_capacity + fill_amount) + self._current_capacity = new_capacity + self._last_timestamp = timestamp diff --git a/py311/lib/python3.11/site-packages/botocore/retries/quota.py b/py311/lib/python3.11/site-packages/botocore/retries/quota.py new file mode 100644 index 0000000000000000000000000000000000000000..f03942912a16aafcd964f0f2c89c7732035ab193 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/retries/quota.py @@ -0,0 +1,54 @@ +"""Retry quota implementation.""" + +import threading + + +class RetryQuota: + INITIAL_CAPACITY = 500 + + def __init__(self, initial_capacity=INITIAL_CAPACITY, lock=None): + self._max_capacity = initial_capacity + self._available_capacity = initial_capacity + if lock is None: + lock = threading.Lock() + self._lock = lock + + def acquire(self, capacity_amount): + """Attempt to aquire a certain amount of capacity. + + If there's not sufficient amount of capacity available, ``False`` + is returned. Otherwise, ``True`` is returned, which indicates that + capacity was successfully allocated. + + """ + # The acquire() is only called when we encounter a retryable + # response so we aren't worried about locking the entire method. + with self._lock: + if capacity_amount > self._available_capacity: + return False + self._available_capacity -= capacity_amount + return True + + def release(self, capacity_amount): + """Release capacity back to the retry quota. + + The capacity being released will be truncated if necessary + to ensure the max capacity is never exceeded. + + """ + # Implementation note: The release() method is called as part + # of the "after-call" event, which means it gets invoked for + # every API call. In the common case where the request is + # successful and we're at full capacity, we can avoid locking. + # We can't exceed max capacity so there's no work we have to do. + if self._max_capacity == self._available_capacity: + return + with self._lock: + amount = min( + self._max_capacity - self._available_capacity, capacity_amount + ) + self._available_capacity += amount + + @property + def available_capacity(self): + return self._available_capacity diff --git a/py311/lib/python3.11/site-packages/botocore/retries/special.py b/py311/lib/python3.11/site-packages/botocore/retries/special.py new file mode 100644 index 0000000000000000000000000000000000000000..9b782601da6040cbbb19a5c8af4fd4de939cc0db --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/retries/special.py @@ -0,0 +1,51 @@ +"""Special cased retries. + +These are additional retry cases we still have to handle from the legacy +retry handler. They don't make sense as part of the standard mode retry +module. Ideally we should be able to remove this module. + +""" + +import logging +from binascii import crc32 + +from botocore.retries.base import BaseRetryableChecker + +logger = logging.getLogger(__name__) + + +# TODO: This is an ideal candidate for the retryable trait once that's +# available. +class RetryIDPCommunicationError(BaseRetryableChecker): + _SERVICE_NAME = 'sts' + + def is_retryable(self, context): + service_name = context.operation_model.service_model.service_name + if service_name != self._SERVICE_NAME: + return False + error_code = context.get_error_code() + return error_code == 'IDPCommunicationError' + + +class RetryDDBChecksumError(BaseRetryableChecker): + _CHECKSUM_HEADER = 'x-amz-crc32' + _SERVICE_NAME = 'dynamodb' + + def is_retryable(self, context): + service_name = context.operation_model.service_model.service_name + if service_name != self._SERVICE_NAME: + return False + if context.http_response is None: + return False + checksum = context.http_response.headers.get(self._CHECKSUM_HEADER) + if checksum is None: + return False + actual_crc32 = crc32(context.http_response.content) & 0xFFFFFFFF + if actual_crc32 != int(checksum): + logger.debug( + "DynamoDB crc32 checksum does not match, " + "expected: %s, actual: %s", + checksum, + actual_crc32, + ) + return True diff --git a/py311/lib/python3.11/site-packages/botocore/retries/standard.py b/py311/lib/python3.11/site-packages/botocore/retries/standard.py new file mode 100644 index 0000000000000000000000000000000000000000..8801530b000df3d371fed6c15b00a067c9045995 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/retries/standard.py @@ -0,0 +1,532 @@ +"""Standard retry behavior. + +This contains the default standard retry behavior. +It provides consistent behavior with other AWS SDKs. + +The key base classes uses for retries: + + * ``BaseRetryableChecker`` - Use to check a specific condition that + indicates a retry should happen. This can include things like + max attempts, HTTP status code checks, error code checks etc. + * ``RetryBackoff`` - Use to determine how long we should backoff until + we retry a request. This is the class that will implement delay such + as exponential backoff. + * ``RetryPolicy`` - Main class that determines if a retry should + happen. It can combine data from a various BaseRetryableCheckers + to make a final call as to whether or not a retry should happen. + It then uses a ``BaseRetryBackoff`` to determine how long to delay. + * ``RetryHandler`` - The bridge between botocore's event system + used by endpoint.py to manage retries and the interfaces defined + in this module. + +This allows us to define an API that has minimal coupling to the event +based API used by botocore. + +""" + +import logging +import random + +from botocore.exceptions import ( + ConnectionError, + ConnectTimeoutError, + HTTPClientError, + ReadTimeoutError, +) +from botocore.retries import quota, special +from botocore.retries.base import BaseRetryableChecker, BaseRetryBackoff + +DEFAULT_MAX_ATTEMPTS = 3 +logger = logging.getLogger(__name__) + + +def register_retry_handler(client, max_attempts=DEFAULT_MAX_ATTEMPTS): + retry_quota = RetryQuotaChecker(quota.RetryQuota()) + + service_id = client.meta.service_model.service_id + service_event_name = service_id.hyphenize() + client.meta.events.register( + f'after-call.{service_event_name}', retry_quota.release_retry_quota + ) + + handler = RetryHandler( + retry_policy=RetryPolicy( + retry_checker=StandardRetryConditions(max_attempts=max_attempts), + retry_backoff=ExponentialBackoff(), + ), + retry_event_adapter=RetryEventAdapter(), + retry_quota=retry_quota, + ) + + unique_id = f'retry-config-{service_event_name}' + client.meta.events.register( + f'needs-retry.{service_event_name}', + handler.needs_retry, + unique_id=unique_id, + ) + return handler + + +class RetryHandler: + """Bridge between botocore's event system and this module. + + This class is intended to be hooked to botocore's event system + as an event handler. + """ + + def __init__(self, retry_policy, retry_event_adapter, retry_quota): + self._retry_policy = retry_policy + self._retry_event_adapter = retry_event_adapter + self._retry_quota = retry_quota + + def needs_retry(self, **kwargs): + """Connect as a handler to the needs-retry event.""" + retry_delay = None + context = self._retry_event_adapter.create_retry_context(**kwargs) + if self._retry_policy.should_retry(context): + # Before we can retry we need to ensure we have sufficient + # capacity in our retry quota. + if self._retry_quota.acquire_retry_quota(context): + retry_delay = self._retry_policy.compute_retry_delay(context) + logger.debug( + "Retry needed, retrying request after delay of: %s", + retry_delay, + ) + else: + logger.debug( + "Retry needed but retry quota reached, " + "not retrying request." + ) + else: + logger.debug("Not retrying request.") + self._retry_event_adapter.adapt_retry_response_from_context(context) + return retry_delay + + +class RetryEventAdapter: + """Adapter to existing retry interface used in the endpoints layer. + + This existing interface for determining if a retry needs to happen + is event based and used in ``botocore.endpoint``. The interface has + grown organically over the years and could use some cleanup. This + adapter converts that interface into the interface used by the + new retry strategies. + + """ + + def create_retry_context(self, **kwargs): + """Create context based on needs-retry kwargs.""" + response = kwargs['response'] + if response is None: + # If response is None it means that an exception was raised + # because we never received a response from the service. This + # could be something like a ConnectionError we get from our + # http layer. + http_response = None + parsed_response = None + else: + http_response, parsed_response = response + # This provides isolation between the kwargs emitted in the + # needs-retry event, and what this module uses to check for + # retries. + context = RetryContext( + attempt_number=kwargs['attempts'], + operation_model=kwargs['operation'], + http_response=http_response, + parsed_response=parsed_response, + caught_exception=kwargs['caught_exception'], + request_context=kwargs['request_dict']['context'], + ) + return context + + def adapt_retry_response_from_context(self, context): + """Modify response back to user back from context.""" + # This will mutate attributes that are returned back to the end + # user. We do it this way so that all the various retry classes + # don't mutate any input parameters from the needs-retry event. + metadata = context.get_retry_metadata() + if context.parsed_response is not None: + context.parsed_response.setdefault('ResponseMetadata', {}).update( + metadata + ) + + +# Implementation note: this is meant to encapsulate all the misc stuff +# that gets sent in the needs-retry event. This is mapped so that params +# are more clear and explicit. +class RetryContext: + """Normalize a response that we use to check if a retry should occur. + + This class smoothes over the different types of responses we may get + from a service including: + + * A modeled error response from the service that contains a service + code and error message. + * A raw HTTP response that doesn't contain service protocol specific + error keys. + * An exception received while attempting to retrieve a response. + This could be a ConnectionError we receive from our HTTP layer which + could represent that we weren't able to receive a response from + the service. + + This class guarantees that at least one of the above attributes will be + non None. + + This class is meant to provide a read-only view into the properties + associated with a possible retryable response. None of the properties + are meant to be modified directly. + + """ + + def __init__( + self, + attempt_number, + operation_model=None, + parsed_response=None, + http_response=None, + caught_exception=None, + request_context=None, + ): + # 1-based attempt number. + self.attempt_number = attempt_number + self.operation_model = operation_model + # This is the parsed response dictionary we get from parsing + # the HTTP response from the service. + self.parsed_response = parsed_response + # This is an instance of botocore.awsrequest.AWSResponse. + self.http_response = http_response + # This is a subclass of Exception that will be non None if + # an exception was raised when retrying to retrieve a response. + self.caught_exception = caught_exception + # This is the request context dictionary that's added to the + # request dict. This is used to story any additional state + # about the request. We use this for storing retry quota + # capacity. + if request_context is None: + request_context = {} + self.request_context = request_context + self._retry_metadata = {} + + # These are misc helper methods to avoid duplication in the various + # checkers. + def get_error_code(self): + """Check if there was a parsed response with an error code. + + If we could not find any error codes, ``None`` is returned. + + """ + if self.parsed_response is None: + return + error = self.parsed_response.get('Error', {}) + if not isinstance(error, dict): + return + return error.get('Code') + + def add_retry_metadata(self, **kwargs): + """Add key/value pairs to the retry metadata. + + This allows any objects during the retry process to add + metadata about any checks/validations that happened. + + This gets added to the response metadata in the retry handler. + + """ + self._retry_metadata.update(**kwargs) + + def get_retry_metadata(self): + return self._retry_metadata.copy() + + +class RetryPolicy: + def __init__(self, retry_checker, retry_backoff): + self._retry_checker = retry_checker + self._retry_backoff = retry_backoff + + def should_retry(self, context): + return self._retry_checker.is_retryable(context) + + def compute_retry_delay(self, context): + return self._retry_backoff.delay_amount(context) + + +class ExponentialBackoff(BaseRetryBackoff): + _BASE = 2 + _MAX_BACKOFF = 20 + + def __init__(self, max_backoff=20, random=random.random): + self._base = self._BASE + self._max_backoff = max_backoff + self._random = random + + def delay_amount(self, context): + """Calculates delay based on exponential backoff. + + This class implements truncated binary exponential backoff + with jitter:: + + t_i = rand(0, 1) * min(2 ** attempt, MAX_BACKOFF) + + where ``i`` is the request attempt (0 based). + + """ + # The context.attempt_number is a 1-based value, but we have + # to calculate the delay based on i based a 0-based value. We + # want the first delay to just be ``rand(0, 1)``. + return self._random() * min( + (self._base ** (context.attempt_number - 1)), + self._max_backoff, + ) + + +class MaxAttemptsChecker(BaseRetryableChecker): + def __init__(self, max_attempts): + self._max_attempts = max_attempts + + def is_retryable(self, context): + under_max_attempts = context.attempt_number < self._max_attempts + retries_context = context.request_context.get('retries') + if retries_context: + retries_context['max'] = max( + retries_context.get('max', 0), self._max_attempts + ) + if not under_max_attempts: + logger.debug("Max attempts of %s reached.", self._max_attempts) + context.add_retry_metadata(MaxAttemptsReached=True) + return under_max_attempts + + +class TransientRetryableChecker(BaseRetryableChecker): + _TRANSIENT_ERROR_CODES = [ + 'RequestTimeout', + 'RequestTimeoutException', + 'PriorRequestNotComplete', + ] + _TRANSIENT_STATUS_CODES = [500, 502, 503, 504] + _TRANSIENT_EXCEPTION_CLS = ( + ConnectionError, + HTTPClientError, + ) + + def __init__( + self, + transient_error_codes=None, + transient_status_codes=None, + transient_exception_cls=None, + ): + if transient_error_codes is None: + transient_error_codes = self._TRANSIENT_ERROR_CODES[:] + if transient_status_codes is None: + transient_status_codes = self._TRANSIENT_STATUS_CODES[:] + if transient_exception_cls is None: + transient_exception_cls = self._TRANSIENT_EXCEPTION_CLS + self._transient_error_codes = transient_error_codes + self._transient_status_codes = transient_status_codes + self._transient_exception_cls = transient_exception_cls + + def is_retryable(self, context): + if context.get_error_code() in self._transient_error_codes: + return True + if context.http_response is not None: + if ( + context.http_response.status_code + in self._transient_status_codes + ): + return True + if context.caught_exception is not None: + return isinstance( + context.caught_exception, self._transient_exception_cls + ) + return False + + +class ThrottledRetryableChecker(BaseRetryableChecker): + # This is the union of all error codes we've seen that represent + # a throttled error. + _THROTTLED_ERROR_CODES = [ + 'Throttling', + 'ThrottlingException', + 'ThrottledException', + 'RequestThrottledException', + 'TooManyRequestsException', + 'ProvisionedThroughputExceededException', + 'TransactionInProgressException', + 'RequestLimitExceeded', + 'BandwidthLimitExceeded', + 'LimitExceededException', + 'RequestThrottled', + 'SlowDown', + 'PriorRequestNotComplete', + 'EC2ThrottledException', + ] + + def __init__(self, throttled_error_codes=None): + if throttled_error_codes is None: + throttled_error_codes = self._THROTTLED_ERROR_CODES[:] + self._throttled_error_codes = throttled_error_codes + + def is_retryable(self, context): + # Only the error code from a parsed service response is used + # to determine if the response is a throttled response. + return context.get_error_code() in self._throttled_error_codes + + +class ModeledRetryableChecker(BaseRetryableChecker): + """Check if an error has been modeled as retryable.""" + + def __init__(self): + self._error_detector = ModeledRetryErrorDetector() + + def is_retryable(self, context): + error_code = context.get_error_code() + if error_code is None: + return False + return self._error_detector.detect_error_type(context) is not None + + +class ModeledRetryErrorDetector: + """Checks whether or not an error is a modeled retryable error.""" + + # There are return values from the detect_error_type() method. + TRANSIENT_ERROR = 'TRANSIENT_ERROR' + THROTTLING_ERROR = 'THROTTLING_ERROR' + # This class is lower level than ModeledRetryableChecker, which + # implements BaseRetryableChecker. This object allows you to distinguish + # between the various types of retryable errors. + + def detect_error_type(self, context): + """Detect the error type associated with an error code and model. + + This will either return: + + * ``self.TRANSIENT_ERROR`` - If the error is a transient error + * ``self.THROTTLING_ERROR`` - If the error is a throttling error + * ``None`` - If the error is neither type of error. + + """ + error_code = context.get_error_code() + op_model = context.operation_model + if op_model is None or not op_model.error_shapes: + return + for shape in op_model.error_shapes: + if shape.metadata.get('retryable') is not None: + # Check if this error code matches the shape. This can + # be either by name or by a modeled error code. + error_code_to_check = ( + shape.metadata.get('error', {}).get('code') or shape.name + ) + if error_code == error_code_to_check: + if shape.metadata['retryable'].get('throttling'): + return self.THROTTLING_ERROR + return self.TRANSIENT_ERROR + + +class ThrottlingErrorDetector: + def __init__(self, retry_event_adapter): + self._modeled_error_detector = ModeledRetryErrorDetector() + self._fixed_error_code_detector = ThrottledRetryableChecker() + self._retry_event_adapter = retry_event_adapter + + # This expects the kwargs from needs-retry to be passed through. + def is_throttling_error(self, **kwargs): + context = self._retry_event_adapter.create_retry_context(**kwargs) + if self._fixed_error_code_detector.is_retryable(context): + return True + error_type = self._modeled_error_detector.detect_error_type(context) + return error_type == self._modeled_error_detector.THROTTLING_ERROR + + +class StandardRetryConditions(BaseRetryableChecker): + """Concrete class that implements the standard retry policy checks. + + Specifically: + + not max_attempts and (transient or throttled or modeled_retry) + + """ + + def __init__(self, max_attempts=DEFAULT_MAX_ATTEMPTS): + # Note: This class is for convenience so you can have the + # standard retry condition in a single class. + self._max_attempts_checker = MaxAttemptsChecker(max_attempts) + self._additional_checkers = OrRetryChecker( + [ + TransientRetryableChecker(), + ThrottledRetryableChecker(), + ModeledRetryableChecker(), + OrRetryChecker( + [ + special.RetryIDPCommunicationError(), + special.RetryDDBChecksumError(), + ] + ), + ] + ) + + def is_retryable(self, context): + return self._max_attempts_checker.is_retryable( + context + ) and self._additional_checkers.is_retryable(context) + + +class OrRetryChecker(BaseRetryableChecker): + def __init__(self, checkers): + self._checkers = checkers + + def is_retryable(self, context): + return any(checker.is_retryable(context) for checker in self._checkers) + + +class RetryQuotaChecker: + _RETRY_COST = 5 + _NO_RETRY_INCREMENT = 1 + _TIMEOUT_RETRY_REQUEST = 10 + _TIMEOUT_EXCEPTIONS = (ConnectTimeoutError, ReadTimeoutError) + + # Implementation note: We're not making this a BaseRetryableChecker + # because this isn't just a check if we can retry. This also changes + # state so we have to careful when/how we call this. Making it + # a BaseRetryableChecker implies you can call .is_retryable(context) + # as many times as you want and not affect anything. + + def __init__(self, quota): + self._quota = quota + # This tracks the last amount + self._last_amount_acquired = None + + def acquire_retry_quota(self, context): + if self._is_timeout_error(context): + capacity_amount = self._TIMEOUT_RETRY_REQUEST + else: + capacity_amount = self._RETRY_COST + success = self._quota.acquire(capacity_amount) + if success: + # We add the capacity amount to the request context so we know + # how much to release later. The capacity amount can vary based + # on the error. + context.request_context['retry_quota_capacity'] = capacity_amount + return True + context.add_retry_metadata(RetryQuotaReached=True) + return False + + def _is_timeout_error(self, context): + return isinstance(context.caught_exception, self._TIMEOUT_EXCEPTIONS) + + # This is intended to be hooked up to ``after-call``. + def release_retry_quota(self, context, http_response, **kwargs): + # There's three possible options. + # 1. The HTTP response did not have a 2xx response. In that case we + # give no quota back. + # 2. The HTTP request was successful and was never retried. In + # that case we give _NO_RETRY_INCREMENT back. + # 3. The API call had retries, and we eventually receive an HTTP + # response with a 2xx status code. In that case we give back + # whatever quota was associated with the last acquisition. + if http_response is None: + return + status_code = http_response.status_code + if 200 <= status_code < 300: + if 'retry_quota_capacity' not in context: + self._quota.release(self._NO_RETRY_INCREMENT) + else: + capacity_amount = context['retry_quota_capacity'] + self._quota.release(capacity_amount) diff --git a/py311/lib/python3.11/site-packages/botocore/retries/throttling.py b/py311/lib/python3.11/site-packages/botocore/retries/throttling.py new file mode 100644 index 0000000000000000000000000000000000000000..34ab417299767553718f9070dcf8fecad4dbe551 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/retries/throttling.py @@ -0,0 +1,55 @@ +from collections import namedtuple + +CubicParams = namedtuple('CubicParams', ['w_max', 'k', 'last_fail']) + + +class CubicCalculator: + _SCALE_CONSTANT = 0.4 + _BETA = 0.7 + + def __init__( + self, + starting_max_rate, + start_time, + scale_constant=_SCALE_CONSTANT, + beta=_BETA, + ): + self._w_max = starting_max_rate + self._scale_constant = scale_constant + self._beta = beta + self._k = self._calculate_zero_point() + self._last_fail = start_time + + def _calculate_zero_point(self): + scaled_value = (self._w_max * (1 - self._beta)) / self._scale_constant + k = scaled_value ** (1 / 3.0) + return k + + def success_received(self, timestamp): + dt = timestamp - self._last_fail + new_rate = self._scale_constant * (dt - self._k) ** 3 + self._w_max + return new_rate + + def error_received(self, current_rate, timestamp): + # Consider not having this be the current measured rate. + + # We have a new max rate, which is the current rate we were sending + # at when we received an error response. + self._w_max = current_rate + self._k = self._calculate_zero_point() + self._last_fail = timestamp + return current_rate * self._beta + + def get_params_snapshot(self): + """Return a read-only object of the current cubic parameters. + + These parameters are intended to be used for debug/troubleshooting + purposes. These object is a read-only snapshot and cannot be used + to modify the behavior of the CUBIC calculations. + + New parameters may be added to this object in the future. + + """ + return CubicParams( + w_max=self._w_max, k=self._k, last_fail=self._last_fail + ) diff --git a/py311/lib/python3.11/site-packages/botocore/retryhandler.py b/py311/lib/python3.11/site-packages/botocore/retryhandler.py new file mode 100644 index 0000000000000000000000000000000000000000..c2eed1d9d37607b5fa6961ec523d72aceec994aa --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/retryhandler.py @@ -0,0 +1,416 @@ +# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/ +# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +import functools +import logging +import random +from binascii import crc32 + +from botocore.exceptions import ( + ChecksumError, + ConnectionClosedError, + ConnectionError, + EndpointConnectionError, + ReadTimeoutError, +) + +logger = logging.getLogger(__name__) +# The only supported error for now is GENERAL_CONNECTION_ERROR +# which maps to requests generic ConnectionError. If we're able +# to get more specific exceptions from requests we can update +# this mapping with more specific exceptions. +EXCEPTION_MAP = { + 'GENERAL_CONNECTION_ERROR': [ + ConnectionError, + ConnectionClosedError, + ReadTimeoutError, + EndpointConnectionError, + ], +} + + +def delay_exponential(base, growth_factor, attempts): + """Calculate time to sleep based on exponential function. + + The format is:: + + base * growth_factor ^ (attempts - 1) + + If ``base`` is set to 'rand' then a random number between + 0 and 1 will be used as the base. + Base must be greater than 0, otherwise a ValueError will be + raised. + + """ + if base == 'rand': + base = random.random() + elif base <= 0: + raise ValueError( + f"The 'base' param must be greater than 0, got: {base}" + ) + time_to_sleep = base * (growth_factor ** (attempts - 1)) + return time_to_sleep + + +def create_exponential_delay_function(base, growth_factor): + """Create an exponential delay function based on the attempts. + + This is used so that you only have to pass it the attempts + parameter to calculate the delay. + + """ + return functools.partial( + delay_exponential, base=base, growth_factor=growth_factor + ) + + +def create_retry_handler(config, operation_name=None): + checker = create_checker_from_retry_config( + config, operation_name=operation_name + ) + action = create_retry_action_from_config( + config, operation_name=operation_name + ) + return RetryHandler(checker=checker, action=action) + + +def create_retry_action_from_config(config, operation_name=None): + # The spec has the possibility of supporting per policy + # actions, but right now, we assume this comes from the + # default section, which means that delay functions apply + # for every policy in the retry config (per service). + delay_config = config['__default__']['delay'] + if delay_config['type'] == 'exponential': + return create_exponential_delay_function( + base=delay_config['base'], + growth_factor=delay_config['growth_factor'], + ) + + +def create_checker_from_retry_config(config, operation_name=None): + checkers = [] + max_attempts = None + retryable_exceptions = [] + if '__default__' in config: + policies = config['__default__'].get('policies', []) + max_attempts = config['__default__']['max_attempts'] + for key in policies: + current_config = policies[key] + checkers.append(_create_single_checker(current_config)) + retry_exception = _extract_retryable_exception(current_config) + if retry_exception is not None: + retryable_exceptions.extend(retry_exception) + if operation_name is not None and config.get(operation_name) is not None: + operation_policies = config[operation_name]['policies'] + for key in operation_policies: + checkers.append(_create_single_checker(operation_policies[key])) + retry_exception = _extract_retryable_exception( + operation_policies[key] + ) + if retry_exception is not None: + retryable_exceptions.extend(retry_exception) + if len(checkers) == 1: + # Don't need to use a MultiChecker + return MaxAttemptsDecorator(checkers[0], max_attempts=max_attempts) + else: + multi_checker = MultiChecker(checkers) + return MaxAttemptsDecorator( + multi_checker, + max_attempts=max_attempts, + retryable_exceptions=tuple(retryable_exceptions), + ) + + +def _create_single_checker(config): + if 'response' in config['applies_when']: + return _create_single_response_checker( + config['applies_when']['response'] + ) + elif 'socket_errors' in config['applies_when']: + return ExceptionRaiser() + + +def _create_single_response_checker(response): + if 'service_error_code' in response: + checker = ServiceErrorCodeChecker( + status_code=response['http_status_code'], + error_code=response['service_error_code'], + ) + elif 'http_status_code' in response: + checker = HTTPStatusCodeChecker( + status_code=response['http_status_code'] + ) + elif 'crc32body' in response: + checker = CRC32Checker(header=response['crc32body']) + else: + # TODO: send a signal. + raise ValueError("Unknown retry policy") + return checker + + +def _extract_retryable_exception(config): + applies_when = config['applies_when'] + if 'crc32body' in applies_when.get('response', {}): + return [ChecksumError] + elif 'socket_errors' in applies_when: + exceptions = [] + for name in applies_when['socket_errors']: + exceptions.extend(EXCEPTION_MAP[name]) + return exceptions + + +class RetryHandler: + """Retry handler. + + The retry handler takes two params, ``checker`` object + and an ``action`` object. + + The ``checker`` object must be a callable object and based on a response + and an attempt number, determines whether or not sufficient criteria for + a retry has been met. If this is the case then the ``action`` object + (which also is a callable) determines what needs to happen in the event + of a retry. + + """ + + def __init__(self, checker, action): + self._checker = checker + self._action = action + + def __call__(self, attempts, response, caught_exception, **kwargs): + """Handler for a retry. + + Intended to be hooked up to an event handler (hence the **kwargs), + this will process retries appropriately. + + """ + checker_kwargs = { + 'attempt_number': attempts, + 'response': response, + 'caught_exception': caught_exception, + } + if isinstance(self._checker, MaxAttemptsDecorator): + retries_context = kwargs['request_dict']['context'].get('retries') + checker_kwargs.update({'retries_context': retries_context}) + + if self._checker(**checker_kwargs): + result = self._action(attempts=attempts) + logger.debug("Retry needed, action of: %s", result) + return result + logger.debug("No retry needed.") + + +class BaseChecker: + """Base class for retry checkers. + + Each class is responsible for checking a single criteria that determines + whether or not a retry should not happen. + + """ + + def __call__(self, attempt_number, response, caught_exception): + """Determine if retry criteria matches. + + Note that either ``response`` is not None and ``caught_exception`` is + None or ``response`` is None and ``caught_exception`` is not None. + + :type attempt_number: int + :param attempt_number: The total number of times we've attempted + to send the request. + + :param response: The HTTP response (if one was received). + + :type caught_exception: Exception + :param caught_exception: Any exception that was caught while trying to + send the HTTP response. + + :return: True, if the retry criteria matches (and therefore a retry + should occur. False if the criteria does not match. + + """ + # The default implementation allows subclasses to not have to check + # whether or not response is None or not. + if response is not None: + return self._check_response(attempt_number, response) + elif caught_exception is not None: + return self._check_caught_exception( + attempt_number, caught_exception + ) + else: + raise ValueError("Both response and caught_exception are None.") + + def _check_response(self, attempt_number, response): + pass + + def _check_caught_exception(self, attempt_number, caught_exception): + pass + + +class MaxAttemptsDecorator(BaseChecker): + """Allow retries up to a maximum number of attempts. + + This will pass through calls to the decorated retry checker, provided + that the number of attempts does not exceed max_attempts. It will + also catch any retryable_exceptions passed in. Once max_attempts has + been exceeded, then False will be returned or the retryable_exceptions + that was previously being caught will be raised. + + """ + + def __init__(self, checker, max_attempts, retryable_exceptions=None): + self._checker = checker + self._max_attempts = max_attempts + self._retryable_exceptions = retryable_exceptions + + def __call__( + self, attempt_number, response, caught_exception, retries_context + ): + if retries_context: + retries_context['max'] = max( + retries_context.get('max', 0), self._max_attempts + ) + + should_retry = self._should_retry( + attempt_number, response, caught_exception + ) + if should_retry: + if attempt_number >= self._max_attempts: + # explicitly set MaxAttemptsReached + if response is not None and 'ResponseMetadata' in response[1]: + response[1]['ResponseMetadata']['MaxAttemptsReached'] = ( + True + ) + logger.debug( + "Reached the maximum number of retry attempts: %s", + attempt_number, + ) + return False + else: + return should_retry + else: + return False + + def _should_retry(self, attempt_number, response, caught_exception): + if self._retryable_exceptions and attempt_number < self._max_attempts: + try: + return self._checker( + attempt_number, response, caught_exception + ) + except self._retryable_exceptions as e: + logger.debug( + "retry needed, retryable exception caught: %s", + e, + exc_info=True, + ) + return True + else: + # If we've exceeded the max attempts we just let the exception + # propagate if one has occurred. + return self._checker(attempt_number, response, caught_exception) + + +class HTTPStatusCodeChecker(BaseChecker): + def __init__(self, status_code): + self._status_code = status_code + + def _check_response(self, attempt_number, response): + if response[0].status_code == self._status_code: + logger.debug( + "retry needed: retryable HTTP status code received: %s", + self._status_code, + ) + return True + else: + return False + + +class ServiceErrorCodeChecker(BaseChecker): + def __init__(self, status_code, error_code): + self._status_code = status_code + self._error_code = error_code + + def _check_response(self, attempt_number, response): + if response[0].status_code == self._status_code: + actual_error_code = response[1].get('Error', {}).get('Code') + if actual_error_code == self._error_code: + logger.debug( + "retry needed: matching HTTP status and error code seen: " + "%s, %s", + self._status_code, + self._error_code, + ) + return True + return False + + +class MultiChecker(BaseChecker): + def __init__(self, checkers): + self._checkers = checkers + + def __call__(self, attempt_number, response, caught_exception): + for checker in self._checkers: + checker_response = checker( + attempt_number, response, caught_exception + ) + if checker_response: + return checker_response + return False + + +class CRC32Checker(BaseChecker): + def __init__(self, header): + # The header where the expected crc32 is located. + self._header_name = header + + def _check_response(self, attempt_number, response): + http_response = response[0] + expected_crc = http_response.headers.get(self._header_name) + if expected_crc is None: + logger.debug( + "crc32 check skipped, the %s header is not " + "in the http response.", + self._header_name, + ) + else: + actual_crc32 = crc32(response[0].content) & 0xFFFFFFFF + if not actual_crc32 == int(expected_crc): + logger.debug( + "retry needed: crc32 check failed, expected != actual: " + "%s != %s", + int(expected_crc), + actual_crc32, + ) + raise ChecksumError( + checksum_type='crc32', + expected_checksum=int(expected_crc), + actual_checksum=actual_crc32, + ) + + +class ExceptionRaiser(BaseChecker): + """Raise any caught exceptions. + + This class will raise any non None ``caught_exception``. + + """ + + def _check_caught_exception(self, attempt_number, caught_exception): + # This is implementation specific, but this class is useful by + # coordinating with the MaxAttemptsDecorator. + # The MaxAttemptsDecorator has a list of exceptions it should catch + # and retry, but something needs to come along and actually raise the + # caught_exception. That's what this class is being used for. If + # the MaxAttemptsDecorator is not interested in retrying the exception + # then this exception just propagates out past the retry code. + raise caught_exception diff --git a/py311/lib/python3.11/site-packages/botocore/serialize.py b/py311/lib/python3.11/site-packages/botocore/serialize.py new file mode 100644 index 0000000000000000000000000000000000000000..9f8d7601c7d3b5cad2d1c83c6262299334087bec --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/serialize.py @@ -0,0 +1,1277 @@ +# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +"""Protocol input serializes. + +This module contains classes that implement input serialization +for the various AWS protocol types. + +These classes essentially take user input, a model object that +represents what the expected input should look like, and it returns +a dictionary that contains the various parts of a request. A few +high level design decisions: + + +* Each protocol type maps to a separate class, all inherit from + ``Serializer``. +* The return value for ``serialize_to_request`` (the main entry + point) returns a dictionary that represents a request. This + will have keys like ``url_path``, ``query_string``, etc. This + is done so that it's a) easy to test and b) not tied to a + particular HTTP library. See the ``serialize_to_request`` docstring + for more details. + +Unicode +------- + +The input to the serializers should be text (str/unicode), not bytes, +with the exception of blob types. Those are assumed to be binary, +and if a str/unicode type is passed in, it will be encoded as utf-8. +""" + +import base64 +import calendar +import datetime +import decimal +import json +import math +import re +import struct +from xml.etree import ElementTree + +from botocore import validate +from botocore.compat import formatdate +from botocore.exceptions import ParamValidationError +from botocore.useragent import register_feature_id +from botocore.utils import ( + has_header, + is_json_value_header, + parse_to_aware_datetime, + percent_encode, +) + +# From the spec, the default timestamp format if not specified is iso8601. +DEFAULT_TIMESTAMP_FORMAT = 'iso8601' +ISO8601 = '%Y-%m-%dT%H:%M:%SZ' +# Same as ISO8601, but with microsecond precision. +ISO8601_MICRO = '%Y-%m-%dT%H:%M:%S.%fZ' +HOST_PREFIX_RE = re.compile(r"^[A-Za-z0-9\.\-]+$") + +TIMESTAMP_PRECISION_DEFAULT = 'default' +TIMESTAMP_PRECISION_MILLISECOND = 'millisecond' +TIMESTAMP_PRECISION_OPTIONS = ( + TIMESTAMP_PRECISION_DEFAULT, + TIMESTAMP_PRECISION_MILLISECOND, +) + + +def create_serializer( + protocol_name, + include_validation=True, + timestamp_precision=TIMESTAMP_PRECISION_DEFAULT, +): + """Create a serializer for the given protocol. + :param protocol_name: The protocol name to create a serializer for. + :type protocol_name: str + :param include_validation: Whether to include parameter validation. + :type include_validation: bool + :param timestamp_precision: Timestamp precision level. + - 'default': Microseconds for ISO timestamps, seconds for Unix and RFC + - 'millisecond': Millisecond precision (ISO/Unix), seconds for RFC + :type timestamp_precision: str + :return: A serializer instance for the given protocol. + """ + # TODO: Unknown protocols. + serializer = SERIALIZERS[protocol_name]( + timestamp_precision=timestamp_precision + ) + if include_validation: + validator = validate.ParamValidator() + serializer = validate.ParamValidationDecorator(validator, serializer) + return serializer + + +class Serializer: + DEFAULT_METHOD = 'POST' + # Clients can change this to a different MutableMapping + # (i.e OrderedDict) if they want. This is used in the + # compliance test to match the hash ordering used in the + # tests. + MAP_TYPE = dict + DEFAULT_ENCODING = 'utf-8' + + def __init__(self, timestamp_precision=TIMESTAMP_PRECISION_DEFAULT): + if timestamp_precision not in TIMESTAMP_PRECISION_OPTIONS: + raise ValueError( + f"Invalid timestamp precision found while creating serializer: {timestamp_precision}" + ) + self._timestamp_precision = timestamp_precision + + def serialize_to_request(self, parameters, operation_model): + """Serialize parameters into an HTTP request. + + This method takes user provided parameters and a shape + model and serializes the parameters to an HTTP request. + More specifically, this method returns information about + parts of the HTTP request, it does not enforce a particular + interface or standard for an HTTP request. It instead returns + a dictionary of: + + * 'url_path' + * 'host_prefix' + * 'query_string' + * 'headers' + * 'body' + * 'method' + + It is then up to consumers to decide how to map this to a Request + object of their HTTP library of choice. Below is an example + return value:: + + {'body': {'Action': 'OperationName', + 'Bar': 'val2', + 'Foo': 'val1', + 'Version': '2014-01-01'}, + 'headers': {}, + 'method': 'POST', + 'query_string': '', + 'host_prefix': 'value.', + 'url_path': '/'} + + :param parameters: The dictionary input parameters for the + operation (i.e the user input). + :param operation_model: The OperationModel object that describes + the operation. + """ + raise NotImplementedError("serialize_to_request") + + def _create_default_request(self): + # Creates a boilerplate default request dict that subclasses + # can use as a starting point. + serialized = { + 'url_path': '/', + 'query_string': '', + 'method': self.DEFAULT_METHOD, + 'headers': {}, + # An empty body is represented as an empty byte string. + 'body': b'', + } + return serialized + + # Some extra utility methods subclasses can use. + + def _timestamp_iso8601(self, value): + """Return ISO8601 timestamp with precision based on timestamp_precision.""" + # Smithy's standard is milliseconds, so we truncate the timestamp if the millisecond flag is set to true + if self._timestamp_precision == TIMESTAMP_PRECISION_MILLISECOND: + milliseconds = value.microsecond // 1000 + return ( + value.strftime('%Y-%m-%dT%H:%M:%S') + f'.{milliseconds:03d}Z' + ) + else: + # Otherwise we continue supporting microseconds in iso8601 for legacy reasons + if value.microsecond > 0: + timestamp_format = ISO8601_MICRO + else: + timestamp_format = ISO8601 + return value.strftime(timestamp_format) + + def _timestamp_unixtimestamp(self, value): + """Return unix timestamp with precision based on timestamp_precision.""" + # As of the addition of the precision flag, we support millisecond precision here as well + if self._timestamp_precision == TIMESTAMP_PRECISION_MILLISECOND: + base_timestamp = calendar.timegm(value.timetuple()) + milliseconds = (value.microsecond // 1000) / 1000.0 + return base_timestamp + milliseconds + else: + return int(calendar.timegm(value.timetuple())) + + def _timestamp_rfc822(self, value): + """Return RFC822 timestamp (always second precision - RFC doesn't support sub-second).""" + # RFC 2822 doesn't support sub-second precision, so always use second precision format + if isinstance(value, datetime.datetime): + value = int(calendar.timegm(value.timetuple())) + return formatdate(value, usegmt=True) + + def _convert_timestamp_to_str(self, value, timestamp_format=None): + if timestamp_format is None: + timestamp_format = self.TIMESTAMP_FORMAT + timestamp_format = timestamp_format.lower() + datetime_obj = parse_to_aware_datetime(value) + converter = getattr(self, f'_timestamp_{timestamp_format}') + final_value = converter(datetime_obj) + return final_value + + def _get_serialized_name(self, shape, default_name): + # Returns the serialized name for the shape if it exists. + # Otherwise it will return the passed in default_name. + return shape.serialization.get('name', default_name) + + def _get_base64(self, value): + # Returns the base64-encoded version of value, handling + # both strings and bytes. The returned value is a string + # via the default encoding. + if isinstance(value, str): + value = value.encode(self.DEFAULT_ENCODING) + return base64.b64encode(value).strip().decode(self.DEFAULT_ENCODING) + + def _expand_host_prefix(self, parameters, operation_model): + operation_endpoint = operation_model.endpoint + if ( + operation_endpoint is None + or 'hostPrefix' not in operation_endpoint + ): + return None + + host_prefix_expression = operation_endpoint['hostPrefix'] + if operation_model.input_shape is None: + return host_prefix_expression + input_members = operation_model.input_shape.members + host_labels = [ + member + for member, shape in input_members.items() + if shape.serialization.get('hostLabel') + ] + format_kwargs = {} + bad_labels = [] + for name in host_labels: + param = parameters[name] + if not HOST_PREFIX_RE.match(param): + bad_labels.append(name) + format_kwargs[name] = param + if bad_labels: + raise ParamValidationError( + report=( + f"Invalid value for parameter(s): {', '.join(bad_labels)}. " + "Must contain only alphanumeric characters, hyphen, " + "or period." + ) + ) + return host_prefix_expression.format(**format_kwargs) + + def _is_shape_flattened(self, shape): + return shape.serialization.get('flattened') + + def _handle_float(self, value): + if value == float("Infinity"): + value = "Infinity" + elif value == float("-Infinity"): + value = "-Infinity" + elif math.isnan(value): + value = "NaN" + return value + + def _handle_query_compatible_trait(self, operation_model, serialized): + if operation_model.service_model.is_query_compatible: + serialized['headers']['x-amzn-query-mode'] = 'true' + + +class QuerySerializer(Serializer): + TIMESTAMP_FORMAT = 'iso8601' + + def serialize_to_request(self, parameters, operation_model): + shape = operation_model.input_shape + serialized = self._create_default_request() + serialized['method'] = operation_model.http.get( + 'method', self.DEFAULT_METHOD + ) + serialized['headers'] = { + 'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8' + } + # The query serializer only deals with body params so + # that's what we hand off the _serialize_* methods. + body_params = self.MAP_TYPE() + body_params['Action'] = operation_model.name + body_params['Version'] = operation_model.metadata['apiVersion'] + if shape is not None: + self._serialize(body_params, parameters, shape) + serialized['body'] = body_params + + host_prefix = self._expand_host_prefix(parameters, operation_model) + if host_prefix is not None: + serialized['host_prefix'] = host_prefix + + return serialized + + def _serialize(self, serialized, value, shape, prefix=''): + # serialized: The dict that is incrementally added to with the + # final serialized parameters. + # value: The current user input value. + # shape: The shape object that describes the structure of the + # input. + # prefix: The incrementally built up prefix for the serialized + # key (i.e Foo.bar.members.1). + method = getattr( + self, + f'_serialize_type_{shape.type_name}', + self._default_serialize, + ) + method(serialized, value, shape, prefix=prefix) + + def _serialize_type_structure(self, serialized, value, shape, prefix=''): + members = shape.members + for key, value in value.items(): + member_shape = members[key] + member_prefix = self._get_serialized_name(member_shape, key) + if prefix: + member_prefix = f'{prefix}.{member_prefix}' + self._serialize(serialized, value, member_shape, member_prefix) + + def _serialize_type_list(self, serialized, value, shape, prefix=''): + if not value: + # The query protocol serializes empty lists. + serialized[prefix] = '' + return + if self._is_shape_flattened(shape): + list_prefix = prefix + if shape.member.serialization.get('name'): + name = self._get_serialized_name(shape.member, default_name='') + # Replace '.Original' with '.{name}'. + list_prefix = '.'.join(prefix.split('.')[:-1] + [name]) + else: + list_name = shape.member.serialization.get('name', 'member') + list_prefix = f'{prefix}.{list_name}' + for i, element in enumerate(value, 1): + element_prefix = f'{list_prefix}.{i}' + element_shape = shape.member + self._serialize(serialized, element, element_shape, element_prefix) + + def _serialize_type_map(self, serialized, value, shape, prefix=''): + if self._is_shape_flattened(shape): + full_prefix = prefix + else: + full_prefix = f'{prefix}.entry' + template = full_prefix + '.{i}.{suffix}' + key_shape = shape.key + value_shape = shape.value + key_suffix = self._get_serialized_name(key_shape, default_name='key') + value_suffix = self._get_serialized_name(value_shape, 'value') + for i, key in enumerate(value, 1): + key_prefix = template.format(i=i, suffix=key_suffix) + value_prefix = template.format(i=i, suffix=value_suffix) + self._serialize(serialized, key, key_shape, key_prefix) + self._serialize(serialized, value[key], value_shape, value_prefix) + + def _serialize_type_blob(self, serialized, value, shape, prefix=''): + # Blob args must be base64 encoded. + serialized[prefix] = self._get_base64(value) + + def _serialize_type_timestamp(self, serialized, value, shape, prefix=''): + serialized[prefix] = self._convert_timestamp_to_str( + value, shape.serialization.get('timestampFormat') + ) + + def _serialize_type_boolean(self, serialized, value, shape, prefix=''): + if value: + serialized[prefix] = 'true' + else: + serialized[prefix] = 'false' + + def _default_serialize(self, serialized, value, shape, prefix=''): + serialized[prefix] = value + + def _serialize_type_float(self, serialized, value, shape, prefix=''): + serialized[prefix] = self._handle_float(value) + + def _serialize_type_double(self, serialized, value, shape, prefix=''): + self._serialize_type_float(serialized, value, shape, prefix) + + +class EC2Serializer(QuerySerializer): + """EC2 specific customizations to the query protocol serializers. + + The EC2 model is almost, but not exactly, similar to the query protocol + serializer. This class encapsulates those differences. The model + will have be marked with a ``protocol`` of ``ec2``, so you don't need + to worry about wiring this class up correctly. + + """ + + def _get_serialized_name(self, shape, default_name): + # Returns the serialized name for the shape if it exists. + # Otherwise it will return the passed in capitalized default_name. + if 'queryName' in shape.serialization: + return shape.serialization['queryName'] + elif 'name' in shape.serialization: + # A locationName is always capitalized + # on input for the ec2 protocol. + name = shape.serialization['name'] + return name[0].upper() + name[1:] + else: + return default_name + + def _serialize_type_list(self, serialized, value, shape, prefix=''): + for i, element in enumerate(value, 1): + element_prefix = f'{prefix}.{i}' + element_shape = shape.member + self._serialize(serialized, element, element_shape, element_prefix) + + +class JSONSerializer(Serializer): + TIMESTAMP_FORMAT = 'unixtimestamp' + + def serialize_to_request(self, parameters, operation_model): + target = '{}.{}'.format( + operation_model.metadata['targetPrefix'], + operation_model.name, + ) + json_version = operation_model.metadata['jsonVersion'] + serialized = self._create_default_request() + serialized['method'] = operation_model.http.get( + 'method', self.DEFAULT_METHOD + ) + serialized['headers'] = { + 'X-Amz-Target': target, + 'Content-Type': f'application/x-amz-json-{json_version}', + } + self._handle_query_compatible_trait(operation_model, serialized) + + body = self.MAP_TYPE() + input_shape = operation_model.input_shape + if input_shape is not None: + self._serialize(body, parameters, input_shape) + serialized['body'] = json.dumps(body).encode(self.DEFAULT_ENCODING) + + host_prefix = self._expand_host_prefix(parameters, operation_model) + if host_prefix is not None: + serialized['host_prefix'] = host_prefix + + return serialized + + def _serialize(self, serialized, value, shape, key=None): + method = getattr( + self, + f'_serialize_type_{shape.type_name}', + self._default_serialize, + ) + method(serialized, value, shape, key) + + def _serialize_type_structure(self, serialized, value, shape, key): + if shape.is_document_type: + serialized[key] = value + else: + if key is not None: + # If a key is provided, this is a result of a recursive + # call so we need to add a new child dict as the value + # of the passed in serialized dict. We'll then add + # all the structure members as key/vals in the new serialized + # dictionary we just created. + new_serialized = self.MAP_TYPE() + serialized[key] = new_serialized + serialized = new_serialized + members = shape.members + for member_key, member_value in value.items(): + member_shape = members[member_key] + if 'name' in member_shape.serialization: + member_key = member_shape.serialization['name'] + self._serialize( + serialized, member_value, member_shape, member_key + ) + + def _serialize_type_map(self, serialized, value, shape, key): + map_obj = self.MAP_TYPE() + serialized[key] = map_obj + for sub_key, sub_value in value.items(): + self._serialize(map_obj, sub_value, shape.value, sub_key) + + def _serialize_type_list(self, serialized, value, shape, key): + list_obj = [] + serialized[key] = list_obj + for list_item in value: + wrapper = {} + # The JSON list serialization is the only case where we aren't + # setting a key on a dict. We handle this by using + # a __current__ key on a wrapper dict to serialize each + # list item before appending it to the serialized list. + self._serialize(wrapper, list_item, shape.member, "__current__") + list_obj.append(wrapper["__current__"]) + + def _default_serialize(self, serialized, value, shape, key): + serialized[key] = value + + def _serialize_type_timestamp(self, serialized, value, shape, key): + serialized[key] = self._convert_timestamp_to_str( + value, shape.serialization.get('timestampFormat') + ) + + def _serialize_type_blob(self, serialized, value, shape, key): + serialized[key] = self._get_base64(value) + + def _serialize_type_float(self, serialized, value, shape, prefix=''): + if isinstance(value, decimal.Decimal): + value = float(value) + serialized[prefix] = self._handle_float(value) + + def _serialize_type_double(self, serialized, value, shape, prefix=''): + self._serialize_type_float(serialized, value, shape, prefix) + + +class CBORSerializer(Serializer): + UNSIGNED_INT_MAJOR_TYPE = 0 + NEGATIVE_INT_MAJOR_TYPE = 1 + BLOB_MAJOR_TYPE = 2 + STRING_MAJOR_TYPE = 3 + LIST_MAJOR_TYPE = 4 + MAP_MAJOR_TYPE = 5 + TAG_MAJOR_TYPE = 6 + FLOAT_AND_SIMPLE_MAJOR_TYPE = 7 + + def _serialize_data_item(self, serialized, value, shape, key=None): + method = getattr(self, f'_serialize_type_{shape.type_name}') + if method is None: + raise ValueError( + f"Unrecognized C2J type: {shape.type_name}, unable to " + f"serialize request" + ) + method(serialized, value, shape, key) + + def _serialize_type_integer(self, serialized, value, shape, key): + if value >= 0: + major_type = self.UNSIGNED_INT_MAJOR_TYPE + else: + major_type = self.NEGATIVE_INT_MAJOR_TYPE + # The only differences in serializing negative and positive integers is + # that for negative, we set the major type to 1 and set the value to -1 + # minus the value + value = -1 - value + additional_info, num_bytes = self._get_additional_info_and_num_bytes( + value + ) + initial_byte = self._get_initial_byte(major_type, additional_info) + if num_bytes == 0: + serialized.extend(initial_byte) + else: + serialized.extend(initial_byte + value.to_bytes(num_bytes, "big")) + + def _serialize_type_long(self, serialized, value, shape, key): + self._serialize_type_integer(serialized, value, shape, key) + + def _serialize_type_blob(self, serialized, value, shape, key): + if isinstance(value, str): + value = value.encode('utf-8') + elif not isinstance(value, (bytes, bytearray)): + # We support file-like objects for blobs; these already have been + # validated to ensure they have a read method + value = value.read() + length = len(value) + additional_info, num_bytes = self._get_additional_info_and_num_bytes( + length + ) + initial_byte = self._get_initial_byte( + self.BLOB_MAJOR_TYPE, additional_info + ) + if num_bytes == 0: + serialized.extend(initial_byte) + else: + serialized.extend(initial_byte + length.to_bytes(num_bytes, "big")) + serialized.extend(value) + + def _serialize_type_string(self, serialized, value, shape, key): + encoded = value.encode('utf-8') + length = len(encoded) + additional_info, num_bytes = self._get_additional_info_and_num_bytes( + length + ) + initial_byte = self._get_initial_byte( + self.STRING_MAJOR_TYPE, additional_info + ) + if num_bytes == 0: + serialized.extend(initial_byte + encoded) + else: + serialized.extend( + initial_byte + length.to_bytes(num_bytes, "big") + encoded + ) + + def _serialize_type_list(self, serialized, value, shape, key): + length = len(value) + additional_info, num_bytes = self._get_additional_info_and_num_bytes( + length + ) + initial_byte = self._get_initial_byte( + self.LIST_MAJOR_TYPE, additional_info + ) + if num_bytes == 0: + serialized.extend(initial_byte) + else: + serialized.extend(initial_byte + length.to_bytes(num_bytes, "big")) + for item in value: + self._serialize_data_item(serialized, item, shape.member) + + def _serialize_type_map(self, serialized, value, shape, key): + length = len(value) + additional_info, num_bytes = self._get_additional_info_and_num_bytes( + length + ) + initial_byte = self._get_initial_byte( + self.MAP_MAJOR_TYPE, additional_info + ) + if num_bytes == 0: + serialized.extend(initial_byte) + else: + serialized.extend(initial_byte + length.to_bytes(num_bytes, "big")) + for key_item, item in value.items(): + self._serialize_data_item(serialized, key_item, shape.key) + self._serialize_data_item(serialized, item, shape.value) + + def _serialize_type_structure(self, serialized, value, shape, key): + if key is not None: + # For nested structures, we need to serialize the key first + self._serialize_data_item(serialized, key, shape.key_shape) + + # Remove `None` values from the dictionary + value = {k: v for k, v in value.items() if v is not None} + + map_length = len(value) + additional_info, num_bytes = self._get_additional_info_and_num_bytes( + map_length + ) + initial_byte = self._get_initial_byte( + self.MAP_MAJOR_TYPE, additional_info + ) + if num_bytes == 0: + serialized.extend(initial_byte) + else: + serialized.extend( + initial_byte + map_length.to_bytes(num_bytes, "big") + ) + + members = shape.members + for member_key, member_value in value.items(): + member_shape = members[member_key] + if 'name' in member_shape.serialization: + member_key = member_shape.serialization['name'] + if member_value is not None: + self._serialize_type_string(serialized, member_key, None, None) + self._serialize_data_item( + serialized, member_value, member_shape + ) + + def _serialize_type_timestamp(self, serialized, value, shape, key): + timestamp = self._convert_timestamp_to_str(value) + tag = 1 # Use tag 1 for unix timestamp + initial_byte = self._get_initial_byte(self.TAG_MAJOR_TYPE, tag) + serialized.extend(initial_byte) # Tagging the timestamp + additional_info, num_bytes = self._get_additional_info_and_num_bytes( + timestamp + ) + + if num_bytes == 0: + initial_byte = self._get_initial_byte( + self.UNSIGNED_INT_MAJOR_TYPE, timestamp + ) + serialized.extend(initial_byte) + else: + initial_byte = self._get_initial_byte( + self.UNSIGNED_INT_MAJOR_TYPE, additional_info + ) + serialized.extend( + initial_byte + timestamp.to_bytes(num_bytes, "big") + ) + + def _serialize_type_float(self, serialized, value, shape, key): + if self._is_special_number(value): + serialized.extend( + self._get_bytes_for_special_numbers(value) + ) # Handle special values like NaN or Infinity + else: + initial_byte = self._get_initial_byte( + self.FLOAT_AND_SIMPLE_MAJOR_TYPE, 26 + ) + serialized.extend(initial_byte + struct.pack(">f", value)) + + def _serialize_type_double(self, serialized, value, shape, key): + if self._is_special_number(value): + serialized.extend( + self._get_bytes_for_special_numbers(value) + ) # Handle special values like NaN or Infinity + else: + initial_byte = self._get_initial_byte( + self.FLOAT_AND_SIMPLE_MAJOR_TYPE, 27 + ) + serialized.extend(initial_byte + struct.pack(">d", value)) + + def _serialize_type_boolean(self, serialized, value, shape, key): + additional_info = 21 if value else 20 + serialized.extend( + self._get_initial_byte( + self.FLOAT_AND_SIMPLE_MAJOR_TYPE, additional_info + ) + ) + + def _get_additional_info_and_num_bytes(self, value): + # Values under 24 can be stored in the initial byte and don't need further + # encoding + if value < 24: + return value, 0 + # Values between 24 and 255 (inclusive) can be stored in 1 byte and + # correspond to additional info 24 + elif value < 256: + return 24, 1 + # Values up to 65535 can be stored in two bytes and correspond to additional + # info 25 + elif value < 65536: + return 25, 2 + # Values up to 4294967296 can be stored in four bytes and correspond to + # additional info 26 + elif value < 4294967296: + return 26, 4 + # The maximum number of bytes in a definite length data items is 8 which + # to additional info 27 + else: + return 27, 8 + + def _get_initial_byte(self, major_type, additional_info): + # The highest order three bits are the major type, so we need to bitshift the + # major type by 5 + major_type_bytes = major_type << 5 + return (major_type_bytes | additional_info).to_bytes(1, "big") + + def _is_special_number(self, value): + return any( + [ + value == float('inf'), + value == float('-inf'), + math.isnan(value), + ] + ) + + def _get_bytes_for_special_numbers(self, value): + additional_info = 25 + initial_byte = self._get_initial_byte( + self.FLOAT_AND_SIMPLE_MAJOR_TYPE, additional_info + ) + if value == float('inf'): + return initial_byte + struct.pack(">H", 0x7C00) + elif value == float('-inf'): + return initial_byte + struct.pack(">H", 0xFC00) + elif math.isnan(value): + return initial_byte + struct.pack(">H", 0x7E00) + + +class BaseRestSerializer(Serializer): + """Base class for rest protocols. + + The only variance between the various rest protocols is the + way that the body is serialized. All other aspects (headers, uri, etc.) + are the same and logic for serializing those aspects lives here. + + Subclasses must implement the ``_serialize_body_params`` method. + + """ + + QUERY_STRING_TIMESTAMP_FORMAT = 'iso8601' + HEADER_TIMESTAMP_FORMAT = 'rfc822' + # This is a list of known values for the "location" key in the + # serialization dict. The location key tells us where on the request + # to put the serialized value. + KNOWN_LOCATIONS = ['uri', 'querystring', 'header', 'headers'] + + def serialize_to_request(self, parameters, operation_model): + serialized = self._create_default_request() + serialized['method'] = operation_model.http.get( + 'method', self.DEFAULT_METHOD + ) + shape = operation_model.input_shape + + host_prefix = self._expand_host_prefix(parameters, operation_model) + if host_prefix is not None: + serialized['host_prefix'] = host_prefix + + if shape is None: + serialized['url_path'] = operation_model.http['requestUri'] + return serialized + shape_members = shape.members + # While the ``serialized`` key holds the final serialized request + # data, we need interim dicts for the various locations of the + # request. We need this for the uri_path_kwargs and the + # query_string_kwargs because they are templated, so we need + # to gather all the needed data for the string template, + # then we render the template. The body_kwargs is needed + # because once we've collected them all, we run them through + # _serialize_body_params, which for rest-json, creates JSON, + # and for rest-xml, will create XML. This is what the + # ``partitioned`` dict below is for. + partitioned = { + 'uri_path_kwargs': self.MAP_TYPE(), + 'query_string_kwargs': self.MAP_TYPE(), + 'body_kwargs': self.MAP_TYPE(), + 'headers': self.MAP_TYPE(), + } + for param_name, param_value in parameters.items(): + if param_value is None: + # Don't serialize any parameter with a None value. + continue + self._partition_parameters( + partitioned, param_name, param_value, shape_members + ) + serialized['url_path'] = self._render_uri_template( + operation_model.http['requestUri'], partitioned['uri_path_kwargs'] + ) + + if 'authPath' in operation_model.http: + serialized['auth_path'] = self._render_uri_template( + operation_model.http['authPath'], + partitioned['uri_path_kwargs'], + ) + # Note that we lean on the http implementation to handle the case + # where the requestUri path already has query parameters. + # The bundled http client, requests, already supports this. + serialized['query_string'] = partitioned['query_string_kwargs'] + if partitioned['headers']: + serialized['headers'] = partitioned['headers'] + self._serialize_payload( + partitioned, parameters, serialized, shape, shape_members + ) + self._serialize_content_type(serialized, shape, shape_members) + + return serialized + + def _render_uri_template(self, uri_template, params): + # We need to handle two cases:: + # + # /{Bucket}/foo + # /{Key+}/bar + # A label ending with '+' is greedy. There can only + # be one greedy key. + encoded_params = {} + for template_param in re.findall(r'{(.*?)}', uri_template): + if template_param.endswith('+'): + encoded_params[template_param] = percent_encode( + params[template_param[:-1]], safe='/~' + ) + else: + encoded_params[template_param] = percent_encode( + params[template_param] + ) + return uri_template.format(**encoded_params) + + def _serialize_payload( + self, partitioned, parameters, serialized, shape, shape_members + ): + # partitioned - The user input params partitioned by location. + # parameters - The user input params. + # serialized - The final serialized request dict. + # shape - Describes the expected input shape + # shape_members - The members of the input struct shape + payload_member = shape.serialization.get('payload') + if self._has_streaming_payload(payload_member, shape_members): + # If it's streaming, then the body is just the + # value of the payload. + body_payload = parameters.get(payload_member, b'') + body_payload = self._encode_payload(body_payload) + serialized['body'] = body_payload + elif payload_member is not None: + # If there's a payload member, we serialized that + # member to they body. + body_params = parameters.get(payload_member) + if body_params is not None: + serialized['body'] = self._serialize_body_params( + body_params, shape_members[payload_member] + ) + else: + serialized['body'] = self._serialize_empty_body() + elif partitioned['body_kwargs']: + serialized['body'] = self._serialize_body_params( + partitioned['body_kwargs'], shape + ) + elif self._requires_empty_body(shape): + serialized['body'] = self._serialize_empty_body() + + def _serialize_empty_body(self): + return b'' + + def _serialize_content_type(self, serialized, shape, shape_members): + """ + Some protocols require varied Content-Type headers + depending on user input. This allows subclasses to apply + this conditionally. + """ + pass + + def _requires_empty_body(self, shape): + """ + Some protocols require a specific body to represent an empty + payload. This allows subclasses to apply this conditionally. + """ + return False + + def _has_streaming_payload(self, payload, shape_members): + """Determine if payload is streaming (a blob or string).""" + return payload is not None and shape_members[payload].type_name in ( + 'blob', + 'string', + ) + + def _encode_payload(self, body): + if isinstance(body, str): + return body.encode(self.DEFAULT_ENCODING) + return body + + def _partition_parameters( + self, partitioned, param_name, param_value, shape_members + ): + # This takes the user provided input parameter (``param``) + # and figures out where they go in the request dict. + # Some params are HTTP headers, some are used in the URI, some + # are in the request body. This method deals with this. + member = shape_members[param_name] + location = member.serialization.get('location') + key_name = member.serialization.get('name', param_name) + if location == 'uri': + uri_path_value = self._get_uri_and_query_string_value( + param_value, member + ) + partitioned['uri_path_kwargs'][key_name] = uri_path_value + elif location == 'querystring': + if isinstance(param_value, dict): + partitioned['query_string_kwargs'].update(param_value) + elif member.type_name == 'list': + new_param = [ + self._get_uri_and_query_string_value(value, member.member) + for value in param_value + ] + partitioned['query_string_kwargs'][key_name] = new_param + else: + new_param = self._get_uri_and_query_string_value( + param_value, member + ) + partitioned['query_string_kwargs'][key_name] = new_param + elif location == 'header': + shape = shape_members[param_name] + if not param_value and shape.type_name == 'list': + # Empty lists should not be set on the headers + return + partitioned['headers'][key_name] = self._convert_header_value( + shape, param_value + ) + elif location == 'headers': + # 'headers' is a bit of an oddball. The ``key_name`` + # is actually really a prefix for the header names: + header_prefix = key_name + # The value provided by the user is a dict so we'll be + # creating multiple header key/val pairs. The key + # name to use for each header is the header_prefix (``key_name``) + # plus the key provided by the user. + self._do_serialize_header_map( + header_prefix, partitioned['headers'], param_value + ) + else: + partitioned['body_kwargs'][param_name] = param_value + + def _get_uri_and_query_string_value(self, param_value, member): + if member.type_name == 'boolean': + return str(param_value).lower() + elif member.type_name == 'timestamp': + timestamp_format = member.serialization.get( + 'timestampFormat', self.QUERY_STRING_TIMESTAMP_FORMAT + ) + return self._convert_timestamp_to_str( + param_value, timestamp_format + ) + elif member.type_name in ['float', 'double']: + return str(self._handle_float(param_value)) + return param_value + + def _do_serialize_header_map(self, header_prefix, headers, user_input): + for key, val in user_input.items(): + full_key = header_prefix + key + headers[full_key] = val + + def _serialize_body_params(self, params, shape): + raise NotImplementedError('_serialize_body_params') + + def _convert_header_value(self, shape, value): + if shape.type_name == 'timestamp': + datetime_obj = parse_to_aware_datetime(value) + timestamp = calendar.timegm(datetime_obj.utctimetuple()) + timestamp_format = shape.serialization.get( + 'timestampFormat', self.HEADER_TIMESTAMP_FORMAT + ) + return str( + self._convert_timestamp_to_str(timestamp, timestamp_format) + ) + elif shape.type_name == 'list': + if shape.member.type_name == "string": + converted_value = [ + self._escape_header_list_string(v) + for v in value + if v is not None + ] + else: + converted_value = [ + self._convert_header_value(shape.member, v) + for v in value + if v is not None + ] + return ",".join(converted_value) + elif is_json_value_header(shape): + # Serialize with no spaces after separators to save space in + # the header. + return self._get_base64(json.dumps(value, separators=(',', ':'))) + elif shape.type_name == 'boolean': + return str(value).lower() + elif shape.type_name in ['float', 'double']: + return str(self._handle_float(value)) + else: + return str(value) + + def _escape_header_list_string(self, value): + # Escapes a header list string by wrapping it in double quotes if it contains + # a comma or a double quote, and escapes any internal double quotes. + if '"' in value or ',' in value: + return '"' + value.replace('"', '\\"') + '"' + else: + return value + + +class BaseRpcV2Serializer(Serializer): + """Base class for RPCv2 protocols. + + The only variance between the various RPCv2 protocols is the + way that the body is serialized. All other aspects (headers, uri, etc.) + are the same and logic for serializing those aspects lives here. + + Subclasses must implement the ``_serialize_body_params`` and + ``_serialize_headers`` methods. + + """ + + def serialize_to_request(self, parameters, operation_model): + serialized = self._create_default_request() + service_name = operation_model.service_model.metadata['targetPrefix'] + operation_name = operation_model.name + serialized['url_path'] = ( + f'/service/{service_name}/operation/{operation_name}' + ) + + input_shape = operation_model.input_shape + if input_shape is not None: + self._serialize_payload(parameters, serialized, input_shape) + + host_prefix = self._expand_host_prefix(parameters, operation_model) + if host_prefix is not None: + serialized['host_prefix'] = host_prefix + + self._serialize_headers(serialized, operation_model) + + return serialized + + def _serialize_payload(self, parameters, serialized, shape): + body_payload = self._serialize_body_params(parameters, shape) + serialized['body'] = body_payload + + def _serialize_headers(self, serialized, operation_model): + raise NotImplementedError("_serialize_headers") + + def _serialize_body_params(self, parameters, shape): + raise NotImplementedError("_serialize_body_params") + + +class RestJSONSerializer(BaseRestSerializer, JSONSerializer): + def _serialize_empty_body(self): + return b'{}' + + def _requires_empty_body(self, shape): + """ + Serialize an empty JSON object whenever the shape has + members not targeting a location. + """ + for member, val in shape.members.items(): + if 'location' not in val.serialization: + return True + return False + + def _serialize_content_type(self, serialized, shape, shape_members): + """Set Content-Type to application/json for all structured bodies.""" + payload = shape.serialization.get('payload') + if self._has_streaming_payload(payload, shape_members): + # Don't apply content-type to streaming bodies + return + + has_body = serialized['body'] != b'' + has_content_type = has_header('Content-Type', serialized['headers']) + if has_body and not has_content_type: + serialized['headers']['Content-Type'] = 'application/json' + + def _serialize_body_params(self, params, shape): + serialized_body = self.MAP_TYPE() + self._serialize(serialized_body, params, shape) + return json.dumps(serialized_body).encode(self.DEFAULT_ENCODING) + + +class RestXMLSerializer(BaseRestSerializer): + TIMESTAMP_FORMAT = 'iso8601' + + def _serialize_body_params(self, params, shape): + root_name = shape.serialization['name'] + pseudo_root = ElementTree.Element('') + self._serialize(shape, params, pseudo_root, root_name) + real_root = list(pseudo_root)[0] + return ElementTree.tostring(real_root, encoding=self.DEFAULT_ENCODING) + + def _serialize(self, shape, params, xmlnode, name): + method = getattr( + self, + f'_serialize_type_{shape.type_name}', + self._default_serialize, + ) + method(xmlnode, params, shape, name) + + def _serialize_type_structure(self, xmlnode, params, shape, name): + structure_node = ElementTree.SubElement(xmlnode, name) + + self._add_xml_namespace(shape, structure_node) + for key, value in params.items(): + member_shape = shape.members[key] + member_name = member_shape.serialization.get('name', key) + # We need to special case member shapes that are marked as an + # xmlAttribute. Rather than serializing into an XML child node, + # we instead serialize the shape to an XML attribute of the + # *current* node. + if value is None: + # Don't serialize any param whose value is None. + return + if member_shape.serialization.get('xmlAttribute'): + # xmlAttributes must have a serialization name. + xml_attribute_name = member_shape.serialization['name'] + structure_node.attrib[xml_attribute_name] = value + continue + self._serialize(member_shape, value, structure_node, member_name) + + def _serialize_type_list(self, xmlnode, params, shape, name): + member_shape = shape.member + if shape.serialization.get('flattened'): + element_name = name + list_node = xmlnode + else: + element_name = member_shape.serialization.get('name', 'member') + list_node = ElementTree.SubElement(xmlnode, name) + self._add_xml_namespace(shape, list_node) + for item in params: + self._serialize(member_shape, item, list_node, element_name) + + def _serialize_type_map(self, xmlnode, params, shape, name): + # Given the ``name`` of MyMap, and input of {"key1": "val1"} + # we serialize this as: + # + # + # key1 + # val1 + # + # + if not self._is_shape_flattened(shape): + node = ElementTree.SubElement(xmlnode, name) + self._add_xml_namespace(shape, node) + + for key, value in params.items(): + sub_node = ( + ElementTree.SubElement(xmlnode, name) + if self._is_shape_flattened(shape) + else ElementTree.SubElement(node, 'entry') + ) + key_name = self._get_serialized_name(shape.key, default_name='key') + val_name = self._get_serialized_name( + shape.value, default_name='value' + ) + self._serialize(shape.key, key, sub_node, key_name) + self._serialize(shape.value, value, sub_node, val_name) + + def _serialize_type_boolean(self, xmlnode, params, shape, name): + # For scalar types, the 'params' attr is actually just a scalar + # value representing the data we need to serialize as a boolean. + # It will either be 'true' or 'false' + node = ElementTree.SubElement(xmlnode, name) + if params: + str_value = 'true' + else: + str_value = 'false' + node.text = str_value + self._add_xml_namespace(shape, node) + + def _serialize_type_blob(self, xmlnode, params, shape, name): + node = ElementTree.SubElement(xmlnode, name) + node.text = self._get_base64(params) + self._add_xml_namespace(shape, node) + + def _serialize_type_timestamp(self, xmlnode, params, shape, name): + node = ElementTree.SubElement(xmlnode, name) + node.text = str( + self._convert_timestamp_to_str( + params, shape.serialization.get('timestampFormat') + ) + ) + self._add_xml_namespace(shape, node) + + def _serialize_type_float(self, xmlnode, params, shape, name): + node = ElementTree.SubElement(xmlnode, name) + node.text = str(self._handle_float(params)) + self._add_xml_namespace(shape, node) + + def _serialize_type_double(self, xmlnode, params, shape, name): + self._serialize_type_float(xmlnode, params, shape, name) + + def _default_serialize(self, xmlnode, params, shape, name): + node = ElementTree.SubElement(xmlnode, name) + node.text = str(params) + self._add_xml_namespace(shape, node) + + def _add_xml_namespace(self, shape, structure_node): + if 'xmlNamespace' in shape.serialization: + namespace_metadata = shape.serialization['xmlNamespace'] + attribute_name = 'xmlns' + if isinstance(namespace_metadata, dict): + if namespace_metadata.get('prefix'): + attribute_name += f":{namespace_metadata['prefix']}" + structure_node.attrib[attribute_name] = namespace_metadata[ + 'uri' + ] + elif isinstance(namespace_metadata, str): + structure_node.attrib[attribute_name] = namespace_metadata + + +class RpcV2CBORSerializer(BaseRpcV2Serializer, CBORSerializer): + TIMESTAMP_FORMAT = 'unixtimestamp' + + def serialize_to_request(self, parameters, operation_model): + register_feature_id('PROTOCOL_RPC_V2_CBOR') + return super().serialize_to_request(parameters, operation_model) + + def _serialize_body_params(self, parameters, input_shape): + body = bytearray() + self._serialize_data_item(body, parameters, input_shape) + return bytes(body) + + def _serialize_headers(self, serialized, operation_model): + serialized['headers']['smithy-protocol'] = 'rpc-v2-cbor' + + if operation_model.has_event_stream_output: + header_val = 'application/vnd.amazon.eventstream' + else: + header_val = 'application/cbor' + self._handle_query_compatible_trait(operation_model, serialized) + + has_body = serialized['body'] != b'' + has_content_type = has_header('Content-Type', serialized['headers']) + + serialized['headers']['Accept'] = header_val + if not has_content_type and has_body: + serialized['headers']['Content-Type'] = header_val + + +SERIALIZERS = { + 'ec2': EC2Serializer, + 'query': QuerySerializer, + 'json': JSONSerializer, + 'rest-json': RestJSONSerializer, + 'rest-xml': RestXMLSerializer, + 'smithy-rpc-v2-cbor': RpcV2CBORSerializer, +} diff --git a/py311/lib/python3.11/site-packages/botocore/session.py b/py311/lib/python3.11/site-packages/botocore/session.py new file mode 100644 index 0000000000000000000000000000000000000000..c5a61b2f8b2b38cd09052edf9b4f002b8c6946e1 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/session.py @@ -0,0 +1,1330 @@ +# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/ +# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +""" +This module contains the main interface to the botocore package, the +Session object. +""" + +import copy +import logging +import os +import platform +import socket +import warnings + +import botocore.client +import botocore.configloader +import botocore.credentials +import botocore.tokens +from botocore import ( + UNSIGNED, + __version__, + handlers, + invoke_initializers, + monitoring, + paginate, + retryhandler, + translate, + waiter, +) +from botocore.compat import ( + HAS_CRT, # noqa: F401 + MutableMapping, +) +from botocore.configprovider import ( + BOTOCORE_DEFAUT_SESSION_VARIABLES, + ConfigChainFactory, + ConfiguredEndpointProvider, + ConfigValueStore, + DefaultConfigResolver, + SmartDefaultsConfigStoreFactory, + create_botocore_default_config_mapping, +) +from botocore.context import get_context, with_current_context +from botocore.errorfactory import ClientExceptionsFactory +from botocore.exceptions import ( + ConfigNotFound, + InvalidDefaultsMode, + PartialCredentialsError, + ProfileNotFound, + UnknownServiceError, +) +from botocore.hooks import ( + EventAliaser, + HierarchicalEmitter, + first_non_none_response, +) +from botocore.loaders import create_loader +from botocore.model import ServiceModel +from botocore.parsers import ResponseParserFactory +from botocore.plugin import get_botocore_plugins, load_client_plugins +from botocore.regions import EndpointResolver +from botocore.useragent import UserAgentString, register_feature_id +from botocore.utils import ( + EVENT_ALIASES, + IMDSRegionProvider, + validate_region_name, +) + +logger = logging.getLogger(__name__) + + +class Session: + """ + The Session object collects together useful functionality + from `botocore` as well as important data such as configuration + information and credentials into a single, easy-to-use object. + + :ivar available_profiles: A list of profiles defined in the config + file associated with this session. + :ivar profile: The current profile. + """ + + SESSION_VARIABLES = copy.copy(BOTOCORE_DEFAUT_SESSION_VARIABLES) + + #: The default format string to use when configuring the botocore logger. + LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' + + def __init__( + self, + session_vars=None, + event_hooks=None, + include_builtin_handlers=True, + profile=None, + ): + """ + Create a new Session object. + + :type session_vars: dict + :param session_vars: A dictionary that is used to override some or all + of the environment variables associated with this session. The + key/value pairs defined in this dictionary will override the + corresponding variables defined in ``SESSION_VARIABLES``. + + :type event_hooks: BaseEventHooks + :param event_hooks: The event hooks object to use. If one is not + provided, an event hooks object will be automatically created + for you. + + :type include_builtin_handlers: bool + :param include_builtin_handlers: Indicates whether or not to + automatically register builtin handlers. + + :type profile: str + :param profile: The name of the profile to use for this + session. Note that the profile can only be set when + the session is created. + + """ + if event_hooks is None: + self._original_handler = HierarchicalEmitter() + else: + self._original_handler = event_hooks + self._events = EventAliaser(self._original_handler) + if include_builtin_handlers: + self._register_builtin_handlers(self._events) + self.user_agent_name = 'Botocore' + self.user_agent_version = __version__ + self.user_agent_extra = '' + # The _profile attribute is just used to cache the value + # of the current profile to avoid going through the normal + # config lookup process each access time. + self._profile = None + self._config = None + self._credentials = None + self._auth_token = None + self._profile_map = None + # This is a dict that stores per session specific config variable + # overrides via set_config_variable(). + self._session_instance_vars = {} + if profile is not None: + self._session_instance_vars['profile'] = profile + self._client_config = None + self._last_client_region_used = None + self._components = ComponentLocator() + self._internal_components = ComponentLocator() + self._register_components() + self.session_var_map = SessionVarDict(self, self.SESSION_VARIABLES) + if session_vars is not None: + self.session_var_map.update(session_vars) + invoke_initializers(self) + + def _register_components(self): + self._register_credential_provider() + self._register_token_provider() + self._register_data_loader() + self._register_endpoint_resolver() + self._register_event_emitter() + self._register_response_parser_factory() + self._register_exceptions_factory() + self._register_config_store() + self._register_monitor() + self._register_default_config_resolver() + self._register_smart_defaults_factory() + self._register_user_agent_creator() + + def _register_event_emitter(self): + self._components.register_component('event_emitter', self._events) + + def _register_token_provider(self): + self._components.lazy_register_component( + 'token_provider', self._create_token_resolver + ) + + def _create_token_resolver(self): + return botocore.tokens.create_token_resolver(self) + + def _register_credential_provider(self): + self._components.lazy_register_component( + 'credential_provider', self._create_credential_resolver + ) + + def _create_credential_resolver(self): + return botocore.credentials.create_credential_resolver( + self, region_name=self._last_client_region_used + ) + + def _register_data_loader(self): + self._components.lazy_register_component( + 'data_loader', + lambda: create_loader(self.get_config_variable('data_path')), + ) + + def _register_endpoint_resolver(self): + def create_default_resolver(): + loader = self.get_component('data_loader') + endpoints, path = loader.load_data_with_path('endpoints') + uses_builtin = loader.is_builtin_path(path) + return EndpointResolver(endpoints, uses_builtin_data=uses_builtin) + + self._internal_components.lazy_register_component( + 'endpoint_resolver', create_default_resolver + ) + + def _register_default_config_resolver(self): + def create_default_config_resolver(): + loader = self.get_component('data_loader') + defaults = loader.load_data('sdk-default-configuration') + return DefaultConfigResolver(defaults) + + self._internal_components.lazy_register_component( + 'default_config_resolver', create_default_config_resolver + ) + + def _register_smart_defaults_factory(self): + def create_smart_defaults_factory(): + default_config_resolver = self._get_internal_component( + 'default_config_resolver' + ) + imds_region_provider = IMDSRegionProvider(session=self) + return SmartDefaultsConfigStoreFactory( + default_config_resolver, imds_region_provider + ) + + self._internal_components.lazy_register_component( + 'smart_defaults_factory', create_smart_defaults_factory + ) + + def _register_response_parser_factory(self): + self._components.register_component( + 'response_parser_factory', ResponseParserFactory() + ) + + def _register_exceptions_factory(self): + self._internal_components.register_component( + 'exceptions_factory', ClientExceptionsFactory() + ) + + def _register_builtin_handlers(self, events): + for spec in handlers.BUILTIN_HANDLERS: + if len(spec) == 2: + event_name, handler = spec + self.register(event_name, handler) + else: + event_name, handler, register_type = spec + if register_type is handlers.REGISTER_FIRST: + self._events.register_first(event_name, handler) + elif register_type is handlers.REGISTER_LAST: + self._events.register_last(event_name, handler) + + def _register_config_store(self): + config_store_component = ConfigValueStore( + mapping=create_botocore_default_config_mapping(self) + ) + self._components.register_component( + 'config_store', config_store_component + ) + + def _register_monitor(self): + self._internal_components.lazy_register_component( + 'monitor', self._create_csm_monitor + ) + + def _register_user_agent_creator(self): + uas = UserAgentString.from_environment() + self._components.register_component('user_agent_creator', uas) + + def _create_csm_monitor(self): + if self.get_config_variable('csm_enabled'): + client_id = self.get_config_variable('csm_client_id') + host = self.get_config_variable('csm_host') + port = self.get_config_variable('csm_port') + handler = monitoring.Monitor( + adapter=monitoring.MonitorEventAdapter(), + publisher=monitoring.SocketPublisher( + socket=socket.socket(socket.AF_INET, socket.SOCK_DGRAM), + host=host, + port=port, + serializer=monitoring.CSMSerializer( + csm_client_id=client_id + ), + ), + ) + return handler + return None + + def _get_crt_version(self): + user_agent_creator = self.get_component('user_agent_creator') + return user_agent_creator._crt_version or 'Unknown' + + @property + def available_profiles(self): + return list(self._build_profile_map().keys()) + + def _build_profile_map(self): + # This will build the profile map if it has not been created, + # otherwise it will return the cached value. The profile map + # is a list of profile names, to the config values for the profile. + if self._profile_map is None: + self._profile_map = self.full_config['profiles'] + return self._profile_map + + @property + def profile(self): + if self._profile is None: + profile = self.get_config_variable('profile') + self._profile = profile + return self._profile + + def get_config_variable(self, logical_name, methods=None): + if methods is not None: + return self._get_config_variable_with_custom_methods( + logical_name, methods + ) + return self.get_component('config_store').get_config_variable( + logical_name + ) + + def _get_config_variable_with_custom_methods(self, logical_name, methods): + # If a custom list of methods was supplied we need to perserve the + # behavior with the new system. To do so a new chain that is a copy of + # the old one will be constructed, but only with the supplied methods + # being added to the chain. This chain will be consulted for a value + # and then thrown out. This is not efficient, nor is the methods arg + # used in botocore, this is just for backwards compatibility. + chain_builder = SubsetChainConfigFactory(session=self, methods=methods) + mapping = create_botocore_default_config_mapping(self) + for name, config_options in self.session_var_map.items(): + config_name, env_vars, default, typecast = config_options + build_chain_config_args = { + 'conversion_func': typecast, + 'default': default, + } + if 'instance' in methods: + build_chain_config_args['instance_name'] = name + if 'env' in methods: + build_chain_config_args['env_var_names'] = env_vars + if 'config' in methods: + build_chain_config_args['config_property_name'] = config_name + mapping[name] = chain_builder.create_config_chain( + **build_chain_config_args + ) + config_store_component = ConfigValueStore(mapping=mapping) + value = config_store_component.get_config_variable(logical_name) + return value + + def set_config_variable(self, logical_name, value): + """Set a configuration variable to a specific value. + + By using this method, you can override the normal lookup + process used in ``get_config_variable`` by explicitly setting + a value. Subsequent calls to ``get_config_variable`` will + use the ``value``. This gives you per-session specific + configuration values. + + :: + >>> # Assume logical name 'foo' maps to env var 'FOO' + >>> os.environ['FOO'] = 'myvalue' + >>> s.get_config_variable('foo') + 'myvalue' + >>> s.set_config_variable('foo', 'othervalue') + >>> s.get_config_variable('foo') + 'othervalue' + + :type logical_name: str + :param logical_name: The logical name of the session variable + you want to set. These are the keys in ``SESSION_VARIABLES``. + :param value: The value to associate with the config variable. + + """ + logger.debug( + "Setting config variable for %s to %r", + logical_name, + value, + ) + self._session_instance_vars[logical_name] = value + + def instance_variables(self): + return copy.copy(self._session_instance_vars) + + def get_scoped_config(self): + """ + Returns the config values from the config file scoped to the current + profile. + + The configuration data is loaded **only** from the config file. + It does not resolve variables based on different locations + (e.g. first from the session instance, then from environment + variables, then from the config file). If you want this lookup + behavior, use the ``get_config_variable`` method instead. + + Note that this configuration is specific to a single profile (the + ``profile`` session variable). + + If the ``profile`` session variable is set and the profile does + not exist in the config file, a ``ProfileNotFound`` exception + will be raised. + + :raises: ConfigNotFound, ConfigParseError, ProfileNotFound + :rtype: dict + + """ + profile_name = self.get_config_variable('profile') + profile_map = self._build_profile_map() + # If a profile is not explicitly set return the default + # profile config or an empty config dict if we don't have + # a default profile. + if profile_name is None: + return profile_map.get('default', {}) + elif profile_name not in profile_map: + # Otherwise if they specified a profile, it has to + # exist (even if it's the default profile) otherwise + # we complain. + raise ProfileNotFound(profile=profile_name) + else: + return profile_map[profile_name] + + @property + def full_config(self): + """Return the parsed config file. + + The ``get_config`` method returns the config associated with the + specified profile. This property returns the contents of the + **entire** config file. + + :rtype: dict + """ + if self._config is None: + try: + config_file = self.get_config_variable('config_file') + self._config = botocore.configloader.load_config(config_file) + except ConfigNotFound: + self._config = {'profiles': {}} + try: + # Now we need to inject the profiles from the + # credentials file. We don't actually need the values + # in the creds file, only the profile names so that we + # can validate the user is not referring to a nonexistent + # profile. + cred_file = self.get_config_variable('credentials_file') + cred_profiles = botocore.configloader.raw_config_parse( + cred_file + ) + for profile in cred_profiles: + cred_vars = cred_profiles[profile] + if profile not in self._config['profiles']: + self._config['profiles'][profile] = cred_vars + else: + self._config['profiles'][profile].update(cred_vars) + except ConfigNotFound: + pass + return self._config + + def get_default_client_config(self): + """Retrieves the default config for creating clients + + :rtype: botocore.client.Config + :returns: The default client config object when creating clients. If + the value is ``None`` then there is no default config object + attached to the session. + """ + return self._client_config + + def set_default_client_config(self, client_config): + """Sets the default config for creating clients + + :type client_config: botocore.client.Config + :param client_config: The default client config object when creating + clients. If the value is ``None`` then there is no default config + object attached to the session. + """ + self._client_config = client_config + + def set_credentials( + self, access_key, secret_key, token=None, account_id=None + ): + """ + Manually create credentials for this session. If you would + prefer to use botocore without a config file, environment variables, + or IAM roles, you can pass explicit credentials into this + method to establish credentials for this session. + + :type access_key: str + :param access_key: The access key part of the credentials. + + :type secret_key: str + :param secret_key: The secret key part of the credentials. + + :type token: str + :param token: An option session token used by STS session + credentials. + + :type account_id: str + :param account_id: An optional account ID part of the credentials. + """ + self._credentials = botocore.credentials.Credentials( + access_key, secret_key, token, account_id=account_id + ) + + def get_credentials(self): + """ + Return the :class:`botocore.credential.Credential` object + associated with this session. If the credentials have not + yet been loaded, this will attempt to load them. If they + have already been loaded, this will return the cached + credentials. + + """ + if self._credentials is None: + self._credentials = self._components.get_component( + 'credential_provider' + ).load_credentials() + return self._credentials + + def get_auth_token(self, **kwargs): + """ + Return the :class:`botocore.tokens.AuthToken` object associated with + this session. If the authorization token has not yet been loaded, this + will attempt to load it. If it has already been loaded, this will + return the cached authorization token. + + """ + provider = self._components.get_component('token_provider') + + signing_name = kwargs.get('signing_name') + if signing_name is not None: + auth_token = provider.load_token(signing_name=signing_name) + if auth_token is not None: + return auth_token + + if self._auth_token is None: + self._auth_token = provider.load_token() + return self._auth_token + + def user_agent(self): + """ + Return a string suitable for use as a User-Agent header. + The string will be of the form: + + / Python/ / + + Where: + + - agent_name is the value of the `user_agent_name` attribute + of the session object (`Botocore` by default). + - agent_version is the value of the `user_agent_version` + attribute of the session object (the botocore version by default). + by default. + - py_ver is the version of the Python interpreter beng used. + - plat_name is the name of the platform (e.g. Darwin) + - plat_ver is the version of the platform + - exec_env is exec-env/$AWS_EXECUTION_ENV + + If ``user_agent_extra`` is not empty, then this value will be + appended to the end of the user agent string. + + """ + base = ( + f'{self.user_agent_name}/{self.user_agent_version} ' + f'Python/{platform.python_version()} ' + f'{platform.system()}/{platform.release()}' + ) + if HAS_CRT: + base += f' awscrt/{self._get_crt_version()}' + if os.environ.get('AWS_EXECUTION_ENV') is not None: + base += ' exec-env/{}'.format(os.environ.get('AWS_EXECUTION_ENV')) + if self.user_agent_extra: + base += f' {self.user_agent_extra}' + + return base + + def get_data(self, data_path): + """ + Retrieve the data associated with `data_path`. + + :type data_path: str + :param data_path: The path to the data you wish to retrieve. + """ + return self.get_component('data_loader').load_data(data_path) + + def get_service_model(self, service_name, api_version=None): + """Get the service model object. + + :type service_name: string + :param service_name: The service name + + :type api_version: string + :param api_version: The API version of the service. If none is + provided, then the latest API version will be used. + + :rtype: L{botocore.model.ServiceModel} + :return: The botocore service model for the service. + + """ + service_description = self.get_service_data(service_name, api_version) + return ServiceModel(service_description, service_name=service_name) + + def get_waiter_model(self, service_name, api_version=None): + loader = self.get_component('data_loader') + waiter_config = loader.load_service_model( + service_name, 'waiters-2', api_version + ) + return waiter.WaiterModel(waiter_config) + + def get_paginator_model(self, service_name, api_version=None): + loader = self.get_component('data_loader') + paginator_config = loader.load_service_model( + service_name, 'paginators-1', api_version + ) + return paginate.PaginatorModel(paginator_config) + + def get_service_data(self, service_name, api_version=None): + """ + Retrieve the fully merged data associated with a service. + """ + data_path = service_name + service_data = self.get_component('data_loader').load_service_model( + data_path, type_name='service-2', api_version=api_version + ) + service_id = EVENT_ALIASES.get(service_name, service_name) + self._events.emit( + f'service-data-loaded.{service_id}', + service_data=service_data, + service_name=service_name, + session=self, + ) + return service_data + + def get_available_services(self): + """ + Return a list of names of available services. + """ + return self.get_component('data_loader').list_available_services( + type_name='service-2' + ) + + def set_debug_logger(self, logger_name='botocore'): + """ + Convenience function to quickly configure full debug output + to go to the console. + """ + self.set_stream_logger(logger_name, logging.DEBUG) + + def set_stream_logger( + self, logger_name, log_level, stream=None, format_string=None + ): + """ + Convenience method to configure a stream logger. + + :type logger_name: str + :param logger_name: The name of the logger to configure + + :type log_level: str + :param log_level: The log level to set for the logger. This + is any param supported by the ``.setLevel()`` method of + a ``Log`` object. + + :type stream: file + :param stream: A file like object to log to. If none is provided + then sys.stderr will be used. + + :type format_string: str + :param format_string: The format string to use for the log + formatter. If none is provided this will default to + ``self.LOG_FORMAT``. + + """ + log = logging.getLogger(logger_name) + log.setLevel(logging.DEBUG) + + ch = logging.StreamHandler(stream) + ch.setLevel(log_level) + + # create formatter + if format_string is None: + format_string = self.LOG_FORMAT + formatter = logging.Formatter(format_string) + + # add formatter to ch + ch.setFormatter(formatter) + + # add ch to logger + log.addHandler(ch) + + def set_file_logger(self, log_level, path, logger_name='botocore'): + """ + Convenience function to quickly configure any level of logging + to a file. + + :type log_level: int + :param log_level: A log level as specified in the `logging` module + + :type path: string + :param path: Path to the log file. The file will be created + if it doesn't already exist. + """ + log = logging.getLogger(logger_name) + log.setLevel(logging.DEBUG) + + # create console handler and set level to debug + ch = logging.FileHandler(path) + ch.setLevel(log_level) + + # create formatter + formatter = logging.Formatter(self.LOG_FORMAT) + + # add formatter to ch + ch.setFormatter(formatter) + + # add ch to logger + log.addHandler(ch) + + def register( + self, event_name, handler, unique_id=None, unique_id_uses_count=False + ): + """Register a handler with an event. + + :type event_name: str + :param event_name: The name of the event. + + :type handler: callable + :param handler: The callback to invoke when the event + is emitted. This object must be callable, and must + accept ``**kwargs``. If either of these preconditions are + not met, a ``ValueError`` will be raised. + + :type unique_id: str + :param unique_id: An optional identifier to associate with the + registration. A unique_id can only be used once for + the entire session registration (unless it is unregistered). + This can be used to prevent an event handler from being + registered twice. + + :param unique_id_uses_count: boolean + :param unique_id_uses_count: Specifies if the event should maintain + a count when a ``unique_id`` is registered and unregisted. The + event can only be completely unregistered once every register call + using the unique id has been matched by an ``unregister`` call. + If ``unique_id`` is specified, subsequent ``register`` + calls must use the same value for ``unique_id_uses_count`` + as the ``register`` call that first registered the event. + + :raises ValueError: If the call to ``register`` uses ``unique_id`` + but the value for ``unique_id_uses_count`` differs from the + ``unique_id_uses_count`` value declared by the very first + ``register`` call for that ``unique_id``. + """ + self._events.register( + event_name, + handler, + unique_id, + unique_id_uses_count=unique_id_uses_count, + ) + + def unregister( + self, + event_name, + handler=None, + unique_id=None, + unique_id_uses_count=False, + ): + """Unregister a handler with an event. + + :type event_name: str + :param event_name: The name of the event. + + :type handler: callable + :param handler: The callback to unregister. + + :type unique_id: str + :param unique_id: A unique identifier identifying the callback + to unregister. You can provide either the handler or the + unique_id, you do not have to provide both. + + :param unique_id_uses_count: boolean + :param unique_id_uses_count: Specifies if the event should maintain + a count when a ``unique_id`` is registered and unregisted. The + event can only be completely unregistered once every ``register`` + call using the ``unique_id`` has been matched by an ``unregister`` + call. If the ``unique_id`` is specified, subsequent + ``unregister`` calls must use the same value for + ``unique_id_uses_count`` as the ``register`` call that first + registered the event. + + :raises ValueError: If the call to ``unregister`` uses ``unique_id`` + but the value for ``unique_id_uses_count`` differs from the + ``unique_id_uses_count`` value declared by the very first + ``register`` call for that ``unique_id``. + """ + self._events.unregister( + event_name, + handler=handler, + unique_id=unique_id, + unique_id_uses_count=unique_id_uses_count, + ) + + def emit(self, event_name, **kwargs): + return self._events.emit(event_name, **kwargs) + + def emit_first_non_none_response(self, event_name, **kwargs): + responses = self._events.emit(event_name, **kwargs) + return first_non_none_response(responses) + + def get_component(self, name): + try: + return self._components.get_component(name) + except ValueError: + if name in ['endpoint_resolver', 'exceptions_factory']: + warnings.warn( + f'Fetching the {name} component with the get_component() ' + 'method is deprecated as the component has always been ' + 'considered an internal interface of botocore', + DeprecationWarning, + ) + return self._internal_components.get_component(name) + raise + + def _get_internal_component(self, name): + # While this method may be called by botocore classes outside of the + # Session, this method should **never** be used by a class that lives + # outside of botocore. + return self._internal_components.get_component(name) + + def _register_internal_component(self, name, component): + # While this method may be called by botocore classes outside of the + # Session, this method should **never** be used by a class that lives + # outside of botocore. + return self._internal_components.register_component(name, component) + + def register_component(self, name, component): + self._components.register_component(name, component) + + def lazy_register_component(self, name, component): + self._components.lazy_register_component(name, component) + + @with_current_context() + def create_client( + self, + service_name, + region_name=None, + api_version=None, + use_ssl=True, + verify=None, + endpoint_url=None, + aws_access_key_id=None, + aws_secret_access_key=None, + aws_session_token=None, + config=None, + aws_account_id=None, + ): + """Create a botocore client. + + :type service_name: string + :param service_name: The name of the service for which a client will + be created. You can use the ``Session.get_available_services()`` + method to get a list of all available service names. + + :type region_name: string + :param region_name: The name of the region associated with the client. + A client is associated with a single region. + + :type api_version: string + :param api_version: The API version to use. By default, botocore will + use the latest API version when creating a client. You only need + to specify this parameter if you want to use a previous API version + of the client. + + :type use_ssl: boolean + :param use_ssl: Whether or not to use SSL. By default, SSL is used. + Note that not all services support non-ssl connections. + + :type verify: boolean/string + :param verify: Whether or not to verify SSL certificates. + By default SSL certificates are verified. You can provide the + following values: + + * False - do not validate SSL certificates. SSL will still be + used (unless use_ssl is False), but SSL certificates + will not be verified. + * path/to/cert/bundle.pem - A filename of the CA cert bundle to + uses. You can specify this argument if you want to use a + different CA cert bundle than the one used by botocore. + + :type endpoint_url: string + :param endpoint_url: The complete URL to use for the constructed + client. Normally, botocore will automatically construct the + appropriate URL to use when communicating with a service. You can + specify a complete URL (including the "http/https" scheme) to + override this behavior. If this value is provided, then + ``use_ssl`` is ignored. + + :type aws_access_key_id: string + :param aws_access_key_id: The access key to use when creating + the client. This is entirely optional, and if not provided, + the credentials configured for the session will automatically + be used. You only need to provide this argument if you want + to override the credentials used for this specific client. + + :type aws_secret_access_key: string + :param aws_secret_access_key: The secret key to use when creating + the client. Same semantics as aws_access_key_id above. + + :type aws_session_token: string + :param aws_session_token: The session token to use when creating + the client. Same semantics as aws_access_key_id above. + + :type config: botocore.client.Config + :param config: Advanced client configuration options. If a value + is specified in the client config, its value will take precedence + over environment variables and configuration values, but not over + a value passed explicitly to the method. If a default config + object is set on the session, the config object used when creating + the client will be the result of calling ``merge()`` on the + default config with the config provided to this call. + + :type aws_account_id: string + :param aws_account_id: The account id to use when creating + the client. Same semantics as aws_access_key_id above. + + :rtype: botocore.client.BaseClient + :return: A botocore client instance + + """ + default_client_config = self.get_default_client_config() + # If a config is provided and a default config is set, then + # use the config resulting from merging the two. + if config is not None and default_client_config is not None: + config = default_client_config.merge(config) + # If a config was not provided then use the default + # client config from the session + elif default_client_config is not None: + config = default_client_config + + region_name = self._resolve_region_name(region_name, config) + + # Figure out the verify value base on the various + # configuration options. + if verify is None: + verify = self.get_config_variable('ca_bundle') + + if api_version is None: + api_version = self.get_config_variable('api_versions').get( + service_name, None + ) + + loader = self.get_component('data_loader') + event_emitter = self.get_component('event_emitter') + response_parser_factory = self.get_component('response_parser_factory') + if config is not None and config.signature_version is UNSIGNED: + credentials = None + elif ( + aws_access_key_id is not None and aws_secret_access_key is not None + ): + credentials = botocore.credentials.Credentials( + access_key=aws_access_key_id, + secret_key=aws_secret_access_key, + token=aws_session_token, + account_id=aws_account_id, + ) + elif self._missing_cred_vars(aws_access_key_id, aws_secret_access_key): + raise PartialCredentialsError( + provider='explicit', + cred_var=self._missing_cred_vars( + aws_access_key_id, aws_secret_access_key + ), + ) + else: + if ignored_credentials := self._get_ignored_credentials( + aws_session_token, aws_account_id + ): + logger.debug( + "Ignoring the following credential-related values which were set without " + "an access key id and secret key on the session or client: %s", + ignored_credentials, + ) + credentials = self.get_credentials() + if getattr(credentials, 'method', None) == 'explicit': + register_feature_id('CREDENTIALS_CODE') + auth_token = self.get_auth_token() + endpoint_resolver = self._get_internal_component('endpoint_resolver') + exceptions_factory = self._get_internal_component('exceptions_factory') + config_store = copy.copy(self.get_component('config_store')) + user_agent_creator = self.get_component('user_agent_creator') + # Session configuration values for the user agent string are applied + # just before each client creation because they may have been modified + # at any time between session creation and client creation. + user_agent_creator.set_session_config( + session_user_agent_name=self.user_agent_name, + session_user_agent_version=self.user_agent_version, + session_user_agent_extra=self.user_agent_extra, + ) + defaults_mode = self._resolve_defaults_mode(config, config_store) + if defaults_mode != 'legacy': + smart_defaults_factory = self._get_internal_component( + 'smart_defaults_factory' + ) + smart_defaults_factory.merge_smart_defaults( + config_store, defaults_mode, region_name + ) + + self._add_configured_endpoint_provider( + client_name=service_name, + config_store=config_store, + ) + + user_agent_creator.set_client_features(get_context().features) + + client_creator = botocore.client.ClientCreator( + loader, + endpoint_resolver, + self.user_agent(), + event_emitter, + retryhandler, + translate, + response_parser_factory, + exceptions_factory, + config_store, + user_agent_creator=user_agent_creator, + auth_token_resolver=self.get_auth_token, + ) + client = client_creator.create_client( + service_name=service_name, + region_name=region_name, + is_secure=use_ssl, + endpoint_url=endpoint_url, + verify=verify, + credentials=credentials, + scoped_config=self.get_scoped_config(), + client_config=config, + api_version=api_version, + auth_token=auth_token, + ) + monitor = self._get_internal_component('monitor') + if monitor is not None: + monitor.register(client.meta.events) + self._register_client_plugins(client) + return client + + def _resolve_region_name(self, region_name, config): + # Figure out the user-provided region based on the various + # configuration options. + if region_name is None: + if config and config.region_name is not None: + region_name = config.region_name + else: + region_name = self.get_config_variable('region') + + validate_region_name(region_name) + # For any client that we create in retrieving credentials + # we want to create it using the same region as specified in + # creating this client. It is important to note though that the + # credentials client is only created once per session. So if a new + # client is created with a different region, its credential resolver + # will use the region of the first client. However, that is not an + # issue as of now because the credential resolver uses only STS and + # the credentials returned at regional endpoints are valid across + # all regions in the partition. + self._last_client_region_used = region_name + return region_name + + def _resolve_defaults_mode(self, client_config, config_store): + mode = config_store.get_config_variable('defaults_mode') + + if client_config and client_config.defaults_mode: + mode = client_config.defaults_mode + + default_config_resolver = self._get_internal_component( + 'default_config_resolver' + ) + default_modes = default_config_resolver.get_default_modes() + lmode = mode.lower() + if lmode not in default_modes: + raise InvalidDefaultsMode( + mode=mode, valid_modes=', '.join(default_modes) + ) + + return lmode + + def _add_configured_endpoint_provider(self, client_name, config_store): + chain = ConfiguredEndpointProvider( + full_config=self.full_config, + scoped_config=self.get_scoped_config(), + client_name=client_name, + ) + config_store.set_config_provider( + logical_name='endpoint_url', + provider=chain, + ) + + def _missing_cred_vars(self, access_key, secret_key): + if access_key is not None and secret_key is None: + return 'aws_secret_access_key' + if secret_key is not None and access_key is None: + return 'aws_access_key_id' + return None + + def get_available_partitions(self): + """Lists the available partitions found on disk + + :rtype: list + :return: Returns a list of partition names (e.g., ["aws", "aws-cn"]) + """ + resolver = self._get_internal_component('endpoint_resolver') + return resolver.get_available_partitions() + + def get_partition_for_region(self, region_name): + """Lists the partition name of a particular region. + + :type region_name: string + :param region_name: Name of the region to list partition for (e.g., + us-east-1). + + :rtype: string + :return: Returns the respective partition name (e.g., aws). + """ + resolver = self._get_internal_component('endpoint_resolver') + return resolver.get_partition_for_region(region_name) + + def get_available_regions( + self, service_name, partition_name='aws', allow_non_regional=False + ): + """Lists the region and endpoint names of a particular partition. + + :type service_name: string + :param service_name: Name of a service to list endpoint for (e.g., s3). + This parameter accepts a service name (e.g., "elb") or endpoint + prefix (e.g., "elasticloadbalancing"). + + :type partition_name: string + :param partition_name: Name of the partition to limit endpoints to. + (e.g., aws for the public AWS endpoints, aws-cn for AWS China + endpoints, aws-us-gov for AWS GovCloud (US) Endpoints, etc. + + :type allow_non_regional: bool + :param allow_non_regional: Set to True to include endpoints that are + not regional endpoints (e.g., s3-external-1, + fips-us-gov-west-1, etc). + :return: Returns a list of endpoint names (e.g., ["us-east-1"]). + """ + resolver = self._get_internal_component('endpoint_resolver') + results = [] + try: + service_data = self.get_service_data(service_name) + endpoint_prefix = service_data['metadata'].get( + 'endpointPrefix', service_name + ) + results = resolver.get_available_endpoints( + endpoint_prefix, partition_name, allow_non_regional + ) + except UnknownServiceError: + pass + return results + + def _get_ignored_credentials(self, aws_session_token, aws_account_id): + credential_inputs = [] + if aws_session_token: + credential_inputs.append('aws_session_token') + if aws_account_id: + credential_inputs.append('aws_account_id') + return ', '.join(credential_inputs) if credential_inputs else None + + def _register_client_plugins(self, client): + plugins_list = get_botocore_plugins() + if plugins_list == "DISABLED" or not plugins_list: + return + + client_plugins = {} + for plugin in plugins_list.split(','): + try: + name, module = [part.strip() for part in plugin.split('=')] + client_plugins[name] = module + except ValueError: + logger.warning( + "Invalid plugin format: %s. Expected 'name=module'", plugin + ) + + if client_plugins: + load_client_plugins(client, client_plugins) + + +class ComponentLocator: + """Service locator for session components.""" + + def __init__(self): + self._components = {} + self._deferred = {} + + def get_component(self, name): + if name in self._deferred: + factory = self._deferred[name] + self._components[name] = factory() + # Only delete the component from the deferred dict after + # successfully creating the object from the factory as well as + # injecting the instantiated value into the _components dict. + try: + del self._deferred[name] + except KeyError: + # If we get here, it's likely that get_component was called + # concurrently from multiple threads, and another thread + # already deleted the entry. This means the factory was + # probably called twice, but cleaning up the deferred entry + # should not crash outright. + pass + try: + return self._components[name] + except KeyError: + raise ValueError(f"Unknown component: {name}") + + def register_component(self, name, component): + self._components[name] = component + try: + del self._deferred[name] + except KeyError: + pass + + def lazy_register_component(self, name, no_arg_factory): + self._deferred[name] = no_arg_factory + try: + del self._components[name] + except KeyError: + pass + + +class SessionVarDict(MutableMapping): + def __init__(self, session, session_vars): + self._session = session + self._store = copy.copy(session_vars) + + def __getitem__(self, key): + return self._store[key] + + def __setitem__(self, key, value): + self._store[key] = value + self._update_config_store_from_session_vars(key, value) + + def __delitem__(self, key): + del self._store[key] + + def __iter__(self): + return iter(self._store) + + def __len__(self): + return len(self._store) + + def _update_config_store_from_session_vars( + self, logical_name, config_options + ): + # This is for backwards compatibility. The new preferred way to + # modify configuration logic is to use the component system to get + # the config_store component from the session, and then update + # a key with a custom config provider(s). + # This backwards compatibility method takes the old session_vars + # list of tuples and and transforms that into a set of updates to + # the config_store component. + config_chain_builder = ConfigChainFactory(session=self._session) + config_name, env_vars, default, typecast = config_options + config_store = self._session.get_component('config_store') + config_store.set_config_provider( + logical_name, + config_chain_builder.create_config_chain( + instance_name=logical_name, + env_var_names=env_vars, + config_property_names=config_name, + default=default, + conversion_func=typecast, + ), + ) + + +class SubsetChainConfigFactory: + """A class for creating backwards compatible configuration chains. + + This class can be used instead of + :class:`botocore.configprovider.ConfigChainFactory` to make it honor the + methods argument to get_config_variable. This class can be used to filter + out providers that are not in the methods tuple when creating a new config + chain. + """ + + def __init__(self, session, methods, environ=None): + self._factory = ConfigChainFactory(session, environ) + self._supported_methods = methods + + def create_config_chain( + self, + instance_name=None, + env_var_names=None, + config_property_name=None, + default=None, + conversion_func=None, + ): + """Build a config chain following the standard botocore pattern. + + This config chain factory will omit any providers not in the methods + tuple provided at initialization. For example if given the tuple + ('instance', 'config',) it will not inject the environment provider + into the standard config chain. This lets the botocore session support + the custom ``methods`` argument for all the default botocore config + variables when calling ``get_config_variable``. + """ + if 'instance' not in self._supported_methods: + instance_name = None + if 'env' not in self._supported_methods: + env_var_names = None + if 'config' not in self._supported_methods: + config_property_name = None + return self._factory.create_config_chain( + instance_name=instance_name, + env_var_names=env_var_names, + config_property_names=config_property_name, + default=default, + conversion_func=conversion_func, + ) + + +def get_session(env_vars=None): + """ + Return a new session object. + """ + return Session(env_vars) diff --git a/py311/lib/python3.11/site-packages/botocore/signers.py b/py311/lib/python3.11/site-packages/botocore/signers.py new file mode 100644 index 0000000000000000000000000000000000000000..1002d475246e15bbd022a0f9f6d082ea0fed71fc --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/signers.py @@ -0,0 +1,996 @@ +# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import base64 +import datetime +import json +import weakref + +import botocore +import botocore.auth +from botocore.awsrequest import create_request_object, prepare_request_dict +from botocore.compat import OrderedDict, get_current_datetime +from botocore.exceptions import ( + ParamValidationError, + UnknownClientMethodError, + UnknownSignatureVersionError, + UnsupportedSignatureVersionError, +) +from botocore.tokens import FrozenAuthToken +from botocore.utils import ( + ArnParser, + datetime2timestamp, + fix_s3_host, # noqa: F401 +) + + +class RequestSigner: + """ + An object to sign requests before they go out over the wire using + one of the authentication mechanisms defined in ``auth.py``. This + class fires two events scoped to a service and operation name: + + * choose-signer: Allows overriding the auth signer name. + * before-sign: Allows mutating the request before signing. + + Together these events allow for customization of the request + signing pipeline, including overrides, request path manipulation, + and disabling signing per operation. + + + :type service_id: botocore.model.ServiceId + :param service_id: The service id for the service, e.g. ``S3`` + + :type region_name: string + :param region_name: Name of the service region, e.g. ``us-east-1`` + + :type signing_name: string + :param signing_name: Service signing name. This is usually the + same as the service name, but can differ. E.g. + ``emr`` vs. ``elasticmapreduce``. + + :type signature_version: string + :param signature_version: Signature name like ``v4``. + + :type credentials: :py:class:`~botocore.credentials.Credentials` + :param credentials: User credentials with which to sign requests. + + :type event_emitter: :py:class:`~botocore.hooks.BaseEventHooks` + :param event_emitter: Extension mechanism to fire events. + """ + + def __init__( + self, + service_id, + region_name, + signing_name, + signature_version, + credentials, + event_emitter, + auth_token=None, + ): + self._region_name = region_name + self._signing_name = signing_name + self._signature_version = signature_version + self._credentials = credentials + self._auth_token = auth_token + self._service_id = service_id + + # We need weakref to prevent leaking memory in Python 2.6 on Linux 2.6 + self._event_emitter = weakref.proxy(event_emitter) + + @property + def region_name(self): + return self._region_name + + @property + def signature_version(self): + return self._signature_version + + @property + def signing_name(self): + return self._signing_name + + def handler(self, operation_name=None, request=None, **kwargs): + # This is typically hooked up to the "request-created" event + # from a client's event emitter. When a new request is created + # this method is invoked to sign the request. + # Don't call this method directly. + return self.sign(operation_name, request) + + def sign( + self, + operation_name, + request, + region_name=None, + signing_type='standard', + expires_in=None, + signing_name=None, + ): + """Sign a request before it goes out over the wire. + + :type operation_name: string + :param operation_name: The name of the current operation, e.g. + ``ListBuckets``. + :type request: AWSRequest + :param request: The request object to be sent over the wire. + + :type region_name: str + :param region_name: The region to sign the request for. + + :type signing_type: str + :param signing_type: The type of signing to perform. This can be one of + three possible values: + + * 'standard' - This should be used for most requests. + * 'presign-url' - This should be used when pre-signing a request. + * 'presign-post' - This should be used when pre-signing an S3 post. + + :type expires_in: int + :param expires_in: The number of seconds the presigned url is valid + for. This parameter is only valid for signing type 'presign-url'. + + :type signing_name: str + :param signing_name: The name to use for the service when signing. + """ + explicit_region_name = region_name + if region_name is None: + region_name = self._region_name + + if signing_name is None: + signing_name = self._signing_name + + signature_version = self._choose_signer( + operation_name, signing_type, request.context + ) + + # Allow mutating request before signing + self._event_emitter.emit( + f'before-sign.{self._service_id.hyphenize()}.{operation_name}', + request=request, + signing_name=signing_name, + region_name=self._region_name, + signature_version=signature_version, + request_signer=self, + operation_name=operation_name, + ) + + if signature_version != botocore.UNSIGNED: + kwargs = { + 'signing_name': signing_name, + 'region_name': region_name, + 'signature_version': signature_version, + } + if expires_in is not None: + kwargs['expires'] = expires_in + signing_context = request.context.get('signing', {}) + if not explicit_region_name and signing_context.get('region'): + kwargs['region_name'] = signing_context['region'] + if signing_context.get('signing_name'): + kwargs['signing_name'] = signing_context['signing_name'] + if signing_context.get('request_credentials'): + kwargs['request_credentials'] = signing_context[ + 'request_credentials' + ] + if signing_context.get('identity_cache') is not None: + self._resolve_identity_cache( + kwargs, + signing_context['identity_cache'], + signing_context['cache_key'], + ) + try: + auth = self.get_auth_instance(**kwargs) + except UnknownSignatureVersionError as e: + if signing_type != 'standard': + raise UnsupportedSignatureVersionError( + signature_version=signature_version + ) + else: + raise e + + auth.add_auth(request) + + def _resolve_identity_cache(self, kwargs, cache, cache_key): + kwargs['identity_cache'] = cache + kwargs['cache_key'] = cache_key + + def _choose_signer(self, operation_name, signing_type, context): + """ + Allow setting the signature version via the choose-signer event. + A value of `botocore.UNSIGNED` means no signing will be performed. + + :param operation_name: The operation to sign. + :param signing_type: The type of signing that the signer is to be used + for. + :return: The signature version to sign with. + """ + signing_type_suffix_map = { + 'presign-post': '-presign-post', + 'presign-url': '-query', + } + suffix = signing_type_suffix_map.get(signing_type, '') + + # operation specific signing context takes precedent over client-level + # defaults + signature_version = context.get('auth_type') or self._signature_version + signing = context.get('signing', {}) + signing_name = signing.get('signing_name', self._signing_name) + region_name = signing.get('region', self._region_name) + if ( + signature_version is not botocore.UNSIGNED + and not signature_version.endswith(suffix) + ): + signature_version += suffix + + handler, response = self._event_emitter.emit_until_response( + f'choose-signer.{self._service_id.hyphenize()}.{operation_name}', + signing_name=signing_name, + region_name=region_name, + signature_version=signature_version, + context=context, + ) + + if response is not None: + signature_version = response + # The suffix needs to be checked again in case we get an improper + # signature version from choose-signer. + if ( + signature_version is not botocore.UNSIGNED + and not signature_version.endswith(suffix) + ): + signature_version += suffix + + return signature_version + + def get_auth_instance( + self, + signing_name, + region_name, + signature_version=None, + request_credentials=None, + **kwargs, + ): + """ + Get an auth instance which can be used to sign a request + using the given signature version. + + :type signing_name: string + :param signing_name: Service signing name. This is usually the + same as the service name, but can differ. E.g. + ``emr`` vs. ``elasticmapreduce``. + + :type region_name: string + :param region_name: Name of the service region, e.g. ``us-east-1`` + + :type signature_version: string + :param signature_version: Signature name like ``v4``. + + :rtype: :py:class:`~botocore.auth.BaseSigner` + :return: Auth instance to sign a request. + """ + if signature_version is None: + signature_version = self._signature_version + + cls = botocore.auth.AUTH_TYPE_MAPS.get(signature_version) + if cls is None: + raise UnknownSignatureVersionError( + signature_version=signature_version + ) + + if cls.REQUIRES_TOKEN is True: + if self._auth_token and not isinstance( + self._auth_token, FrozenAuthToken + ): + frozen_token = self._auth_token.get_frozen_token() + else: + frozen_token = self._auth_token + auth = cls(frozen_token) + return auth + + credentials = request_credentials or self._credentials + if getattr(cls, "REQUIRES_IDENTITY_CACHE", None) is True: + cache = kwargs["identity_cache"] + key = kwargs["cache_key"] + credentials = cache.get_credentials(key) + del kwargs["cache_key"] + + # If there's no credentials provided (i.e credentials is None), + # then we'll pass a value of "None" over to the auth classes, + # which already handle the cases where no credentials have + # been provided. + frozen_credentials = None + if credentials is not None: + frozen_credentials = credentials.get_frozen_credentials() + kwargs['credentials'] = frozen_credentials + if cls.REQUIRES_REGION: + if self._region_name is None: + raise botocore.exceptions.NoRegionError() + kwargs['region_name'] = region_name + kwargs['service_name'] = signing_name + auth = cls(**kwargs) + return auth + + # Alias get_auth for backwards compatibility. + get_auth = get_auth_instance + + def generate_presigned_url( + self, + request_dict, + operation_name, + expires_in=3600, + region_name=None, + signing_name=None, + ): + """Generates a presigned url + + :type request_dict: dict + :param request_dict: The prepared request dictionary returned by + ``botocore.awsrequest.prepare_request_dict()`` + + :type operation_name: str + :param operation_name: The operation being signed. + + :type expires_in: int + :param expires_in: The number of seconds the presigned url is valid + for. By default it expires in an hour (3600 seconds) + + :type region_name: string + :param region_name: The region name to sign the presigned url. + + :type signing_name: str + :param signing_name: The name to use for the service when signing. + + :returns: The presigned url + """ + request = create_request_object(request_dict) + self.sign( + operation_name, + request, + region_name, + 'presign-url', + expires_in, + signing_name, + ) + + request.prepare() + return request.url + + +class CloudFrontSigner: + '''A signer to create a signed CloudFront URL. + + First you create a cloudfront signer based on a normalized RSA signer:: + + import rsa + def rsa_signer(message): + private_key = open('private_key.pem', 'r').read() + return rsa.sign( + message, + rsa.PrivateKey.load_pkcs1(private_key.encode('utf8')), + 'SHA-1') # CloudFront requires SHA-1 hash + cf_signer = CloudFrontSigner(key_id, rsa_signer) + + To sign with a canned policy:: + + signed_url = cf_signer.generate_signed_url( + url, date_less_than=datetime(2015, 12, 1)) + + To sign with a custom policy:: + + signed_url = cf_signer.generate_signed_url(url, policy=my_policy) + ''' + + def __init__(self, key_id, rsa_signer): + """Create a CloudFrontSigner. + + :type key_id: str + :param key_id: The CloudFront Key Pair ID + + :type rsa_signer: callable + :param rsa_signer: An RSA signer. + Its only input parameter will be the message to be signed, + and its output will be the signed content as a binary string. + The hash algorithm needed by CloudFront is SHA-1. + """ + self.key_id = key_id + self.rsa_signer = rsa_signer + + def generate_presigned_url(self, url, date_less_than=None, policy=None): + """Creates a signed CloudFront URL based on given parameters. + + :type url: str + :param url: The URL of the protected object + + :type date_less_than: datetime + :param date_less_than: The URL will expire after that date and time + + :type policy: str + :param policy: The custom policy, possibly built by self.build_policy() + + :rtype: str + :return: The signed URL. + """ + both_args_supplied = date_less_than is not None and policy is not None + neither_arg_supplied = date_less_than is None and policy is None + if both_args_supplied or neither_arg_supplied: + e = 'Need to provide either date_less_than or policy, but not both' + raise ValueError(e) + if date_less_than is not None: + # We still need to build a canned policy for signing purpose + policy = self.build_policy(url, date_less_than) + if isinstance(policy, str): + policy = policy.encode('utf8') + if date_less_than is not None: + params = [f'Expires={int(datetime2timestamp(date_less_than))}'] + else: + params = [f"Policy={self._url_b64encode(policy).decode('utf8')}"] + signature = self.rsa_signer(policy) + params.extend( + [ + f"Signature={self._url_b64encode(signature).decode('utf8')}", + f"Key-Pair-Id={self.key_id}", + ] + ) + return self._build_url(url, params) + + def _build_url(self, base_url, extra_params): + separator = '&' if '?' in base_url else '?' + return base_url + separator + '&'.join(extra_params) + + def build_policy( + self, resource, date_less_than, date_greater_than=None, ip_address=None + ): + """A helper to build policy. + + :type resource: str + :param resource: The URL or the stream filename of the protected object + + :type date_less_than: datetime + :param date_less_than: The URL will expire after the time has passed + + :type date_greater_than: datetime + :param date_greater_than: The URL will not be valid until this time + + :type ip_address: str + :param ip_address: Use 'x.x.x.x' for an IP, or 'x.x.x.x/x' for a subnet + + :rtype: str + :return: The policy in a compact string. + """ + # Note: + # 1. Order in canned policy is significant. Special care has been taken + # to ensure the output will match the order defined by the document. + # There is also a test case to ensure that order. + # SEE: http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-canned-policy.html#private-content-canned-policy-creating-policy-statement + # 2. Albeit the order in custom policy is not required by CloudFront, + # we still use OrderedDict internally to ensure the result is stable + # and also matches canned policy requirement. + # SEE: http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-custom-policy.html + moment = int(datetime2timestamp(date_less_than)) + condition = OrderedDict({"DateLessThan": {"AWS:EpochTime": moment}}) + if ip_address: + if '/' not in ip_address: + ip_address += '/32' + condition["IpAddress"] = {"AWS:SourceIp": ip_address} + if date_greater_than: + moment = int(datetime2timestamp(date_greater_than)) + condition["DateGreaterThan"] = {"AWS:EpochTime": moment} + ordered_payload = [('Resource', resource), ('Condition', condition)] + custom_policy = {"Statement": [OrderedDict(ordered_payload)]} + return json.dumps(custom_policy, separators=(',', ':')) + + def _url_b64encode(self, data): + # Required by CloudFront. See also: + # http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-linux-openssl.html + return ( + base64.b64encode(data) + .replace(b'+', b'-') + .replace(b'=', b'_') + .replace(b'/', b'~') + ) + + +def add_generate_db_auth_token(class_attributes, **kwargs): + class_attributes['generate_db_auth_token'] = generate_db_auth_token + + +def add_dsql_generate_db_auth_token_methods(class_attributes, **kwargs): + class_attributes['generate_db_connect_auth_token'] = ( + dsql_generate_db_connect_auth_token + ) + class_attributes['generate_db_connect_admin_auth_token'] = ( + dsql_generate_db_connect_admin_auth_token + ) + + +def generate_db_auth_token(self, DBHostname, Port, DBUsername, Region=None): + """Generates an auth token used to connect to a db with IAM credentials. + + :type DBHostname: str + :param DBHostname: The hostname of the database to connect to. + + :type Port: int + :param Port: The port number the database is listening on. + + :type DBUsername: str + :param DBUsername: The username to log in as. + + :type Region: str + :param Region: The region the database is in. If None, the client + region will be used. + + :return: A presigned url which can be used as an auth token. + """ + region = Region + if region is None: + region = self.meta.region_name + + params = { + 'Action': 'connect', + 'DBUser': DBUsername, + } + + request_dict = { + 'url_path': '/', + 'query_string': '', + 'headers': {}, + 'body': params, + 'method': 'GET', + } + + # RDS requires that the scheme not be set when sent over. This can cause + # issues when signing because the Python url parsing libraries follow + # RFC 1808 closely, which states that a netloc must be introduced by `//`. + # Otherwise the url is presumed to be relative, and thus the whole + # netloc would be treated as a path component. To work around this we + # introduce https here and remove it once we're done processing it. + scheme = 'https://' + endpoint_url = f'{scheme}{DBHostname}:{Port}' + prepare_request_dict(request_dict, endpoint_url) + presigned_url = self._request_signer.generate_presigned_url( + operation_name='connect', + request_dict=request_dict, + region_name=region, + expires_in=900, + signing_name='rds-db', + ) + return presigned_url[len(scheme) :] + + +def _dsql_generate_db_auth_token( + self, Hostname, Action, Region=None, ExpiresIn=900 +): + """Generate a DSQL database token for an arbitrary action. + + :type Hostname: str + :param Hostname: The DSQL endpoint host name. + + :type Action: str + :param Action: Action to perform on the cluster (DbConnectAdmin or DbConnect). + + :type Region: str + :param Region: The AWS region where the DSQL Cluster is hosted. If None, the client region will be used. + + :type ExpiresIn: int + :param ExpiresIn: The token expiry duration in seconds (default is 900 seconds). + + :return: A presigned url which can be used as an auth token. + """ + possible_actions = ("DbConnect", "DbConnectAdmin") + + if Action not in possible_actions: + raise ParamValidationError( + report=f"Received {Action} for action but expected one of: {', '.join(possible_actions)}" + ) + + if Region is None: + Region = self.meta.region_name + + request_dict = { + 'url_path': '/', + 'query_string': '', + 'headers': {}, + 'body': { + 'Action': Action, + }, + 'method': 'GET', + } + scheme = 'https://' + endpoint_url = f'{scheme}{Hostname}' + prepare_request_dict(request_dict, endpoint_url) + presigned_url = self._request_signer.generate_presigned_url( + operation_name=Action, + request_dict=request_dict, + region_name=Region, + expires_in=ExpiresIn, + signing_name='dsql', + ) + return presigned_url[len(scheme) :] + + +def dsql_generate_db_connect_auth_token( + self, Hostname, Region=None, ExpiresIn=900 +): + """Generate a DSQL database token for the "DbConnect" action. + + :type Hostname: str + :param Hostname: The DSQL endpoint host name. + + :type Region: str + :param Region: The AWS region where the DSQL Cluster is hosted. If None, the client region will be used. + + :type ExpiresIn: int + :param ExpiresIn: The token expiry duration in seconds (default is 900 seconds). + + :return: A presigned url which can be used as an auth token. + """ + return _dsql_generate_db_auth_token( + self, Hostname, "DbConnect", Region, ExpiresIn + ) + + +def dsql_generate_db_connect_admin_auth_token( + self, Hostname, Region=None, ExpiresIn=900 +): + """Generate a DSQL database token for the "DbConnectAdmin" action. + + :type Hostname: str + :param Hostname: The DSQL endpoint host name. + + :type Region: str + :param Region: The AWS region where the DSQL Cluster is hosted. If None, the client region will be used. + + :type ExpiresIn: int + :param ExpiresIn: The token expiry duration in seconds (default is 900 seconds). + + :return: A presigned url which can be used as an auth token. + """ + return _dsql_generate_db_auth_token( + self, Hostname, "DbConnectAdmin", Region, ExpiresIn + ) + + +class S3PostPresigner: + def __init__(self, request_signer): + self._request_signer = request_signer + + def generate_presigned_post( + self, + request_dict, + fields=None, + conditions=None, + expires_in=3600, + region_name=None, + ): + """Generates the url and the form fields used for a presigned s3 post + + :type request_dict: dict + :param request_dict: The prepared request dictionary returned by + ``botocore.awsrequest.prepare_request_dict()`` + + :type fields: dict + :param fields: A dictionary of prefilled form fields to build on top + of. + + :type conditions: list + :param conditions: A list of conditions to include in the policy. Each + element can be either a list or a structure. For example: + + .. code:: python + + [ + {"acl": "public-read"}, + {"bucket": "amzn-s3-demo-bucket"}, + ["starts-with", "$key", "mykey"] + ] + + :type expires_in: int + :param expires_in: The number of seconds the presigned post is valid + for. + + :type region_name: string + :param region_name: The region name to sign the presigned post to. + + :rtype: dict + :returns: A dictionary with two elements: ``url`` and ``fields``. + Url is the url to post to. Fields is a dictionary filled with + the form fields and respective values to use when submitting the + post. For example: + + .. code:: python + + { + 'url': 'https://amzn-s3-demo-bucket.s3.amazonaws.com', + 'fields': { + 'acl': 'public-read', + 'key': 'mykey', + 'signature': 'mysignature', + 'policy': 'mybase64 encoded policy' + } + } + """ + if fields is None: + fields = {} + + if conditions is None: + conditions = [] + + # Create the policy for the post. + policy = {} + + # Create an expiration date for the policy + datetime_now = get_current_datetime() + expire_date = datetime_now + datetime.timedelta(seconds=expires_in) + policy['expiration'] = expire_date.strftime(botocore.auth.ISO8601) + + # Append all of the conditions that the user supplied. + policy['conditions'] = [] + for condition in conditions: + policy['conditions'].append(condition) + + # Store the policy and the fields in the request for signing + request = create_request_object(request_dict) + request.context['s3-presign-post-fields'] = fields + request.context['s3-presign-post-policy'] = policy + + self._request_signer.sign( + 'PutObject', request, region_name, 'presign-post' + ) + # Return the url and the fields for th form to post. + return {'url': request.url, 'fields': fields} + + +def add_generate_presigned_url(class_attributes, **kwargs): + class_attributes['generate_presigned_url'] = generate_presigned_url + + +def generate_presigned_url( + self, ClientMethod, Params=None, ExpiresIn=3600, HttpMethod=None +): + """Generate a presigned url given a client, its method, and arguments + + :type ClientMethod: string + :param ClientMethod: The client method to presign for + + :type Params: dict + :param Params: The parameters normally passed to + ``ClientMethod``. + + :type ExpiresIn: int + :param ExpiresIn: The number of seconds the presigned url is valid + for. By default it expires in an hour (3600 seconds) + + :type HttpMethod: string + :param HttpMethod: The http method to use on the generated url. By + default, the http method is whatever is used in the method's model. + + :returns: The presigned url + """ + client_method = ClientMethod + params = Params + if params is None: + params = {} + expires_in = ExpiresIn + http_method = HttpMethod + context = { + 'is_presign_request': True, + 'use_global_endpoint': _should_use_global_endpoint(self), + } + + request_signer = self._request_signer + + try: + operation_name = self._PY_TO_OP_NAME[client_method] + except KeyError: + raise UnknownClientMethodError(method_name=client_method) + + operation_model = self.meta.service_model.operation_model(operation_name) + params = self._emit_api_params( + api_params=params, + operation_model=operation_model, + context=context, + ) + bucket_is_arn = ArnParser.is_arn(params.get('Bucket', '')) + ( + endpoint_url, + additional_headers, + properties, + ) = self._resolve_endpoint_ruleset( + operation_model, + params, + context, + ignore_signing_region=(not bucket_is_arn), + ) + + request_dict = self._convert_to_request_dict( + api_params=params, + operation_model=operation_model, + endpoint_url=endpoint_url, + context=context, + headers=additional_headers, + set_user_agent_header=False, + ) + + # Switch out the http method if user specified it. + if http_method is not None: + request_dict['method'] = http_method + + # Generate the presigned url. + return request_signer.generate_presigned_url( + request_dict=request_dict, + expires_in=expires_in, + operation_name=operation_name, + ) + + +def add_generate_presigned_post(class_attributes, **kwargs): + class_attributes['generate_presigned_post'] = generate_presigned_post + + +def generate_presigned_post( + self, Bucket, Key, Fields=None, Conditions=None, ExpiresIn=3600 +): + """Builds the url and the form fields used for a presigned s3 post + + :type Bucket: string + :param Bucket: The name of the bucket to presign the post to. Note that + bucket related conditions should not be included in the + ``conditions`` parameter. + + :type Key: string + :param Key: Key name, optionally add ${filename} to the end to + attach the submitted filename. Note that key related conditions and + fields are filled out for you and should not be included in the + ``Fields`` or ``Conditions`` parameter. + + :type Fields: dict + :param Fields: A dictionary of prefilled form fields to build on top + of. Elements that may be included are acl, Cache-Control, + Content-Type, Content-Disposition, Content-Encoding, Expires, + success_action_redirect, redirect, success_action_status, + and x-amz-meta-. + + Note that if a particular element is included in the fields + dictionary it will not be automatically added to the conditions + list. You must specify a condition for the element as well. + + :type Conditions: list + :param Conditions: A list of conditions to include in the policy. Each + element can be either a list or a structure. For example: + + .. code:: python + + [ + {"acl": "public-read"}, + ["content-length-range", 2, 5], + ["starts-with", "$success_action_redirect", ""] + ] + + Conditions that are included may pertain to acl, + content-length-range, Cache-Control, Content-Type, + Content-Disposition, Content-Encoding, Expires, + success_action_redirect, redirect, success_action_status, + and/or x-amz-meta-. + + Note that if you include a condition, you must specify + a valid value in the fields dictionary as well. A value will + not be added automatically to the fields dictionary based on the + conditions. + + :type ExpiresIn: int + :param ExpiresIn: The number of seconds the presigned post + is valid for. + + :rtype: dict + :returns: A dictionary with two elements: ``url`` and ``fields``. + Url is the url to post to. Fields is a dictionary filled with + the form fields and respective values to use when submitting the + post. For example: + + .. code:: python + + { + 'url': 'https://amzn-s3-demo-bucket.s3.amazonaws.com', + 'fields': { + 'acl': 'public-read', + 'key': 'mykey', + 'signature': 'mysignature', + 'policy': 'mybase64 encoded policy' + } + } + """ + bucket = Bucket + key = Key + fields = Fields + conditions = Conditions + expires_in = ExpiresIn + + if fields is None: + fields = {} + else: + fields = fields.copy() + + if conditions is None: + conditions = [] + + context = { + 'is_presign_request': True, + 'use_global_endpoint': _should_use_global_endpoint(self), + } + + post_presigner = S3PostPresigner(self._request_signer) + + # We choose the CreateBucket operation model because its url gets + # serialized to what a presign post requires. + operation_model = self.meta.service_model.operation_model('CreateBucket') + params = self._emit_api_params( + api_params={'Bucket': bucket}, + operation_model=operation_model, + context=context, + ) + bucket_is_arn = ArnParser.is_arn(params.get('Bucket', '')) + ( + endpoint_url, + additional_headers, + properties, + ) = self._resolve_endpoint_ruleset( + operation_model, + params, + context, + ignore_signing_region=(not bucket_is_arn), + ) + + request_dict = self._convert_to_request_dict( + api_params=params, + operation_model=operation_model, + endpoint_url=endpoint_url, + context=context, + headers=additional_headers, + set_user_agent_header=False, + ) + + # Append that the bucket name to the list of conditions. + conditions.append({'bucket': bucket}) + + # If the key ends with filename, the only constraint that can be + # imposed is if it starts with the specified prefix. + if key.endswith('${filename}'): + conditions.append(["starts-with", '$key', key[: -len('${filename}')]]) + else: + conditions.append({'key': key}) + + # Add the key to the fields. + fields['key'] = key + + return post_presigner.generate_presigned_post( + request_dict=request_dict, + fields=fields, + conditions=conditions, + expires_in=expires_in, + ) + + +def _should_use_global_endpoint(client): + if client.meta.partition != 'aws': + return False + s3_config = client.meta.config.s3 + if s3_config: + if s3_config.get('use_dualstack_endpoint', False): + return False + if ( + s3_config.get('us_east_1_regional_endpoint') == 'regional' + and client.meta.config.region_name == 'us-east-1' + ): + return False + if s3_config.get('addressing_style') == 'virtual': + return False + return True diff --git a/py311/lib/python3.11/site-packages/botocore/stub.py b/py311/lib/python3.11/site-packages/botocore/stub.py new file mode 100644 index 0000000000000000000000000000000000000000..d85294a7f448fedd1aeb37b8f7dcb7ec38f33f9e --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/stub.py @@ -0,0 +1,452 @@ +# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import copy +from collections import deque +from pprint import pformat + +from botocore.awsrequest import AWSResponse +from botocore.exceptions import ( + ParamValidationError, + StubAssertionError, + StubResponseError, + UnStubbedResponseError, +) +from botocore.validate import validate_parameters + + +class _ANY: + """ + A helper object that compares equal to everything. Copied from + unittest.mock + """ + + def __eq__(self, other): + return True + + def __ne__(self, other): + return False + + def __repr__(self): + return '' + + +ANY = _ANY() + + +class Stubber: + """ + This class will allow you to stub out requests so you don't have to hit + an endpoint to write tests. Responses are returned first in, first out. + If operations are called out of order, or are called with no remaining + queued responses, an error will be raised. + + **Example:** + :: + import datetime + import botocore.session + from botocore.stub import Stubber + + + s3 = botocore.session.get_session().create_client('s3') + stubber = Stubber(s3) + + response = { + 'IsTruncated': False, + 'Name': 'test-bucket', + 'MaxKeys': 1000, 'Prefix': '', + 'Contents': [{ + 'Key': 'test.txt', + 'ETag': '"abc123"', + 'StorageClass': 'STANDARD', + 'LastModified': datetime.datetime(2016, 1, 20, 22, 9), + 'Owner': {'ID': 'abc123', 'DisplayName': 'myname'}, + 'Size': 14814 + }], + 'EncodingType': 'url', + 'ResponseMetadata': { + 'RequestId': 'abc123', + 'HTTPStatusCode': 200, + 'HostId': 'abc123' + }, + 'Marker': '' + } + + expected_params = {'Bucket': 'test-bucket'} + + stubber.add_response('list_objects', response, expected_params) + stubber.activate() + + service_response = s3.list_objects(Bucket='test-bucket') + assert service_response == response + + + This class can also be called as a context manager, which will handle + activation / deactivation for you. + + **Example:** + :: + import datetime + import botocore.session + from botocore.stub import Stubber + + + s3 = botocore.session.get_session().create_client('s3') + + response = { + "Owner": { + "ID": "foo", + "DisplayName": "bar" + }, + "Buckets": [{ + "CreationDate": datetime.datetime(2016, 1, 20, 22, 9), + "Name": "baz" + }] + } + + + with Stubber(s3) as stubber: + stubber.add_response('list_buckets', response, {}) + service_response = s3.list_buckets() + + assert service_response == response + + + If you have an input parameter that is a randomly generated value, or you + otherwise don't care about its value, you can use ``stub.ANY`` to ignore + it in validation. + + **Example:** + :: + import datetime + import botocore.session + from botocore.stub import Stubber, ANY + + + s3 = botocore.session.get_session().create_client('s3') + stubber = Stubber(s3) + + response = { + 'IsTruncated': False, + 'Name': 'test-bucket', + 'MaxKeys': 1000, 'Prefix': '', + 'Contents': [{ + 'Key': 'test.txt', + 'ETag': '"abc123"', + 'StorageClass': 'STANDARD', + 'LastModified': datetime.datetime(2016, 1, 20, 22, 9), + 'Owner': {'ID': 'abc123', 'DisplayName': 'myname'}, + 'Size': 14814 + }], + 'EncodingType': 'url', + 'ResponseMetadata': { + 'RequestId': 'abc123', + 'HTTPStatusCode': 200, + 'HostId': 'abc123' + }, + 'Marker': '' + } + + expected_params = {'Bucket': ANY} + stubber.add_response('list_objects', response, expected_params) + + with stubber: + service_response = s3.list_objects(Bucket='test-bucket') + + assert service_response == response + """ + + def __init__(self, client): + """ + :param client: The client to add your stubs to. + """ + self.client = client + self._event_id = 'boto_stubber' + self._expected_params_event_id = 'boto_stubber_expected_params' + self._stub_account_id_event_id = 'boto_stubber_stub_account_id' + self._queue = deque() + + def __enter__(self): + self.activate() + return self + + def __exit__(self, exception_type, exception_value, traceback): + self.deactivate() + + def activate(self): + """ + Activates the stubber on the client + """ + self.client.meta.events.register_first( + 'before-parameter-build.*.*', + self._assert_expected_params, + unique_id=self._expected_params_event_id, + ) + self.client.meta.events.register( + 'before-call.*.*', + self._get_response_handler, + unique_id=self._event_id, + ) + self.client.meta.events.register( + 'before-endpoint-resolution.*', + self._set_account_id_for_endpoint_resolution, + unique_id=self._stub_account_id_event_id, + ) + + def deactivate(self): + """ + Deactivates the stubber on the client + """ + self.client.meta.events.unregister( + 'before-parameter-build.*.*', + self._assert_expected_params, + unique_id=self._expected_params_event_id, + ) + self.client.meta.events.unregister( + 'before-call.*.*', + self._get_response_handler, + unique_id=self._event_id, + ) + self.client.meta.events.unregister( + 'before-endpoint-resolution.*', + self._stub_account_id_event_id, + unique_id=self._stub_account_id_event_id, + ) + + def add_response(self, method, service_response, expected_params=None): + """ + Adds a service response to the response queue. This will be validated + against the service model to ensure correctness. It should be noted, + however, that while missing attributes are often considered correct, + your code may not function properly if you leave them out. Therefore + you should always fill in every value you see in a typical response for + your particular request. + + :param method: The name of the client method to stub. + :type method: str + + :param service_response: A dict response stub. Provided parameters will + be validated against the service model. + :type service_response: dict + + :param expected_params: A dictionary of the expected parameters to + be called for the provided service response. The parameters match + the names of keyword arguments passed to that client call. If + any of the parameters differ a ``StubResponseError`` is thrown. + You can use stub.ANY to indicate a particular parameter to ignore + in validation. stub.ANY is only valid for top level params. + """ + self._add_response(method, service_response, expected_params) + + def _add_response(self, method, service_response, expected_params): + if not hasattr(self.client, method): + raise ValueError( + f"Client {self.client.meta.service_model.service_name} " + f"does not have method: {method}" + ) + + # Create a successful http response + http_response = AWSResponse(None, 200, {}, None) + + operation_name = self.client.meta.method_to_api_mapping.get(method) + self._validate_operation_response(operation_name, service_response) + + # Add the service_response to the queue for returning responses + response = { + 'operation_name': operation_name, + 'response': (http_response, service_response), + 'expected_params': expected_params, + } + self._queue.append(response) + + def add_client_error( + self, + method, + service_error_code='', + service_message='', + http_status_code=400, + service_error_meta=None, + expected_params=None, + response_meta=None, + modeled_fields=None, + ): + """ + Adds a ``ClientError`` to the response queue. + + :param method: The name of the service method to return the error on. + :type method: str + + :param service_error_code: The service error code to return, + e.g. ``NoSuchBucket`` + :type service_error_code: str + + :param service_message: The service message to return, e.g. + 'The specified bucket does not exist.' + :type service_message: str + + :param http_status_code: The HTTP status code to return, e.g. 404, etc + :type http_status_code: int + + :param service_error_meta: Additional keys to be added to the + service Error + :type service_error_meta: dict + + :param expected_params: A dictionary of the expected parameters to + be called for the provided service response. The parameters match + the names of keyword arguments passed to that client call. If + any of the parameters differ a ``StubResponseError`` is thrown. + You can use stub.ANY to indicate a particular parameter to ignore + in validation. + + :param response_meta: Additional keys to be added to the + response's ResponseMetadata + :type response_meta: dict + + :param modeled_fields: Additional keys to be added to the response + based on fields that are modeled for the particular error code. + These keys will be validated against the particular error shape + designated by the error code. + :type modeled_fields: dict + + """ + http_response = AWSResponse(None, http_status_code, {}, None) + + # We don't look to the model to build this because the caller would + # need to know the details of what the HTTP body would need to + # look like. + parsed_response = { + 'ResponseMetadata': {'HTTPStatusCode': http_status_code}, + 'Error': {'Message': service_message, 'Code': service_error_code}, + } + + if service_error_meta is not None: + parsed_response['Error'].update(service_error_meta) + + if response_meta is not None: + parsed_response['ResponseMetadata'].update(response_meta) + + if modeled_fields is not None: + service_model = self.client.meta.service_model + shape = service_model.shape_for_error_code(service_error_code) + self._validate_response(shape, modeled_fields) + parsed_response.update(modeled_fields) + + operation_name = self.client.meta.method_to_api_mapping.get(method) + # Note that we do not allow for expected_params while + # adding errors into the queue yet. + response = { + 'operation_name': operation_name, + 'response': (http_response, parsed_response), + 'expected_params': expected_params, + } + self._queue.append(response) + + def assert_no_pending_responses(self): + """ + Asserts that all expected calls were made. + """ + remaining = len(self._queue) + if remaining != 0: + raise AssertionError(f"{remaining} responses remaining in queue.") + + def _assert_expected_call_order(self, model, params): + if not self._queue: + raise UnStubbedResponseError( + operation_name=model.name, + reason=( + 'Unexpected API Call: A call was made but no additional ' + 'calls expected. Either the API Call was not stubbed or ' + 'it was called multiple times.' + ), + ) + + name = self._queue[0]['operation_name'] + if name != model.name: + raise StubResponseError( + operation_name=model.name, + reason=f'Operation mismatch: found response for {name}.', + ) + + def _set_account_id_for_endpoint_resolution(self, builtins, **kwargs): + # Account ID comes from credentials and will try to resolve on endpoint resolution + # when it's a builtin. This breaks any stubber in environments where credentials + # are not available. We mock it to be a None value so that we don't attempt to + # resolve credentials. + if 'AWS::Auth::AccountId' in builtins: + builtins['AWS::Auth::AccountId'] = None + + def _get_response_handler(self, model, params, context, **kwargs): + self._assert_expected_call_order(model, params) + # Pop off the entire response once everything has been validated + return self._queue.popleft()['response'] + + def _assert_expected_params(self, model, params, context, **kwargs): + if self._should_not_stub(context): + return + self._assert_expected_call_order(model, params) + expected_params = self._queue[0]['expected_params'] + if expected_params is None: + return + + # Validate the parameters are equal + for param, value in expected_params.items(): + if param not in params or expected_params[param] != params[param]: + raise StubAssertionError( + operation_name=model.name, + reason=( + f'Expected parameters:\n{pformat(expected_params)},\n' + f'but received:\n{pformat(params)}' + ), + ) + + # Ensure there are no extra params hanging around + if sorted(expected_params.keys()) != sorted(params.keys()): + raise StubAssertionError( + operation_name=model.name, + reason=( + f'Expected parameters:\n{pformat(expected_params)},\n' + f'but received:\n{pformat(params)}' + ), + ) + + def _should_not_stub(self, context): + # Do not include presign requests when processing stubbed client calls + # as a presign request will never have an HTTP request sent over the + # wire for it and therefore not receive a response back. + if context and context.get('is_presign_request'): + return True + + def _validate_operation_response(self, operation_name, service_response): + service_model = self.client.meta.service_model + operation_model = service_model.operation_model(operation_name) + output_shape = operation_model.output_shape + + # Remove ResponseMetadata so that the validator doesn't attempt to + # perform validation on it. + response = service_response + if 'ResponseMetadata' in response: + response = copy.copy(service_response) + del response['ResponseMetadata'] + + self._validate_response(output_shape, response) + + def _validate_response(self, shape, response): + if shape is not None: + validate_parameters(response, shape) + elif response: + # If the output shape is None, that means the response should be + # empty apart from ResponseMetadata + raise ParamValidationError( + report=( + "Service response should only contain ResponseMetadata." + ) + ) diff --git a/py311/lib/python3.11/site-packages/botocore/tokens.py b/py311/lib/python3.11/site-packages/botocore/tokens.py new file mode 100644 index 0000000000000000000000000000000000000000..d90861823112a17f99db2ce286780f38ce4865b5 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/tokens.py @@ -0,0 +1,363 @@ +# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import json +import logging +import os +import threading +from datetime import datetime, timedelta +from typing import NamedTuple, Optional + +import dateutil.parser +from dateutil.tz import tzutc + +from botocore import UNSIGNED +from botocore.compat import total_seconds +from botocore.config import Config +from botocore.exceptions import ( + ClientError, + InvalidConfigError, + TokenRetrievalError, +) +from botocore.utils import ( + CachedProperty, + JSONFileCache, + SSOTokenLoader, + create_nested_client, + get_token_from_environment, +) + +logger = logging.getLogger(__name__) + + +def _utc_now(): + return datetime.now(tzutc()) + + +def create_token_resolver(session): + providers = [ + ScopedEnvTokenProvider(session), + SSOTokenProvider(session), + ] + return TokenProviderChain(providers=providers) + + +def _serialize_utc_timestamp(obj): + if isinstance(obj, datetime): + return obj.strftime("%Y-%m-%dT%H:%M:%SZ") + return obj + + +def _sso_json_dumps(obj): + return json.dumps(obj, default=_serialize_utc_timestamp) + + +class FrozenAuthToken(NamedTuple): + token: str + expiration: Optional[datetime] = None + + +class DeferredRefreshableToken: + # The time at which we'll attempt to refresh, but not block if someone else + # is refreshing. + _advisory_refresh_timeout = 15 * 60 + # The time at which all threads will block waiting for a refreshed token + _mandatory_refresh_timeout = 10 * 60 + # Refresh at most once every minute to avoid blocking every request + _attempt_timeout = 60 + + def __init__(self, method, refresh_using, time_fetcher=_utc_now): + self._time_fetcher = time_fetcher + self._refresh_using = refresh_using + self.method = method + + # The frozen token is protected by this lock + self._refresh_lock = threading.Lock() + self._frozen_token = None + self._next_refresh = None + + def get_frozen_token(self): + self._refresh() + return self._frozen_token + + def _refresh(self): + # If we don't need to refresh just return + refresh_type = self._should_refresh() + if not refresh_type: + return None + + # Block for refresh if we're in the mandatory refresh window + block_for_refresh = refresh_type == "mandatory" + if self._refresh_lock.acquire(block_for_refresh): + try: + self._protected_refresh() + finally: + self._refresh_lock.release() + + def _protected_refresh(self): + # This should only be called after acquiring the refresh lock + # Another thread may have already refreshed, double check refresh + refresh_type = self._should_refresh() + if not refresh_type: + return None + + try: + now = self._time_fetcher() + self._next_refresh = now + timedelta(seconds=self._attempt_timeout) + self._frozen_token = self._refresh_using() + except Exception: + logger.warning( + "Refreshing token failed during the %s refresh period.", + refresh_type, + exc_info=True, + ) + if refresh_type == "mandatory": + # This refresh was mandatory, error must be propagated back + raise + + if self._is_expired(): + # Fresh credentials should never be expired + raise TokenRetrievalError( + provider=self.method, + error_msg="Token has expired and refresh failed", + ) + + def _is_expired(self): + if self._frozen_token is None: + return False + + expiration = self._frozen_token.expiration + remaining = total_seconds(expiration - self._time_fetcher()) + return remaining <= 0 + + def _should_refresh(self): + if self._frozen_token is None: + # We don't have a token yet, mandatory refresh + return "mandatory" + + expiration = self._frozen_token.expiration + if expiration is None: + # No expiration, so assume we don't need to refresh. + return None + + now = self._time_fetcher() + if now < self._next_refresh: + return None + + remaining = total_seconds(expiration - now) + + if remaining < self._mandatory_refresh_timeout: + return "mandatory" + elif remaining < self._advisory_refresh_timeout: + return "advisory" + + return None + + +class TokenProviderChain: + def __init__(self, providers=None): + if providers is None: + providers = [] + self._providers = providers + + def load_token(self, **kwargs): + for provider in self._providers: + token = provider.load_token(**kwargs) + if token is not None: + return token + return None + + +class SSOTokenProvider: + METHOD = "sso" + _REFRESH_WINDOW = 15 * 60 + _SSO_TOKEN_CACHE_DIR = os.path.expanduser( + os.path.join("~", ".aws", "sso", "cache") + ) + _SSO_CONFIG_VARS = [ + "sso_start_url", + "sso_region", + ] + _GRANT_TYPE = "refresh_token" + DEFAULT_CACHE_CLS = JSONFileCache + + def __init__( + self, session, cache=None, time_fetcher=_utc_now, profile_name=None + ): + self._session = session + if cache is None: + cache = self.DEFAULT_CACHE_CLS( + self._SSO_TOKEN_CACHE_DIR, + dumps_func=_sso_json_dumps, + ) + self._now = time_fetcher + self._cache = cache + self._token_loader = SSOTokenLoader(cache=self._cache) + self._profile_name = ( + profile_name + or self._session.get_config_variable("profile") + or 'default' + ) + + def _load_sso_config(self): + loaded_config = self._session.full_config + profiles = loaded_config.get("profiles", {}) + sso_sessions = loaded_config.get("sso_sessions", {}) + profile_config = profiles.get(self._profile_name, {}) + + if "sso_session" not in profile_config: + return + + sso_session_name = profile_config["sso_session"] + sso_config = sso_sessions.get(sso_session_name, None) + + if not sso_config: + error_msg = ( + f'The profile "{self._profile_name}" is configured to use the SSO ' + f'token provider but the "{sso_session_name}" sso_session ' + f"configuration does not exist." + ) + raise InvalidConfigError(error_msg=error_msg) + + missing_configs = [] + for var in self._SSO_CONFIG_VARS: + if var not in sso_config: + missing_configs.append(var) + + if missing_configs: + error_msg = ( + f'The profile "{self._profile_name}" is configured to use the SSO ' + f"token provider but is missing the following configuration: " + f"{missing_configs}." + ) + raise InvalidConfigError(error_msg=error_msg) + + return { + "session_name": sso_session_name, + "sso_region": sso_config["sso_region"], + "sso_start_url": sso_config["sso_start_url"], + } + + @CachedProperty + def _sso_config(self): + return self._load_sso_config() + + @CachedProperty + def _client(self): + config = Config( + region_name=self._sso_config["sso_region"], + signature_version=UNSIGNED, + ) + return create_nested_client(self._session, "sso-oidc", config=config) + + def _attempt_create_token(self, token): + response = self._client.create_token( + grantType=self._GRANT_TYPE, + clientId=token["clientId"], + clientSecret=token["clientSecret"], + refreshToken=token["refreshToken"], + ) + expires_in = timedelta(seconds=response["expiresIn"]) + new_token = { + "startUrl": self._sso_config["sso_start_url"], + "region": self._sso_config["sso_region"], + "accessToken": response["accessToken"], + "expiresAt": self._now() + expires_in, + # Cache the registration alongside the token + "clientId": token["clientId"], + "clientSecret": token["clientSecret"], + "registrationExpiresAt": token["registrationExpiresAt"], + } + if "refreshToken" in response: + new_token["refreshToken"] = response["refreshToken"] + logger.info("SSO Token refresh succeeded") + return new_token + + def _refresh_access_token(self, token): + keys = ( + "refreshToken", + "clientId", + "clientSecret", + "registrationExpiresAt", + ) + missing_keys = [k for k in keys if k not in token] + if missing_keys: + msg = f"Unable to refresh SSO token: missing keys: {missing_keys}" + logger.info(msg) + return None + + expiry = dateutil.parser.parse(token["registrationExpiresAt"]) + if total_seconds(expiry - self._now()) <= 0: + logger.info("SSO token registration expired at %s", expiry) + return None + + try: + return self._attempt_create_token(token) + except ClientError: + logger.warning("SSO token refresh attempt failed", exc_info=True) + return None + + def _refresher(self): + start_url = self._sso_config["sso_start_url"] + session_name = self._sso_config["session_name"] + logger.info("Loading cached SSO token for %s", session_name) + token_dict = self._token_loader(start_url, session_name=session_name) + expiration = dateutil.parser.parse(token_dict["expiresAt"]) + logger.debug("Cached SSO token expires at %s", expiration) + + remaining = total_seconds(expiration - self._now()) + if remaining < self._REFRESH_WINDOW: + new_token_dict = self._refresh_access_token(token_dict) + if new_token_dict is not None: + token_dict = new_token_dict + expiration = token_dict["expiresAt"] + self._token_loader.save_token( + start_url, token_dict, session_name=session_name + ) + + return FrozenAuthToken( + token_dict["accessToken"], expiration=expiration + ) + + def load_token(self, **kwargs): + if self._sso_config is None: + return None + + return DeferredRefreshableToken( + self.METHOD, self._refresher, time_fetcher=self._now + ) + + +class ScopedEnvTokenProvider: + """ + Token provider that loads tokens from environment variables scoped to + a specific `signing_name`. + """ + + METHOD = 'env' + + def __init__(self, session, environ=None): + self._session = session + if environ is None: + environ = os.environ + self.environ = environ + + def load_token(self, **kwargs): + signing_name = kwargs.get("signing_name") + if signing_name is None: + return None + + token = get_token_from_environment(signing_name, self.environ) + + if token is not None: + logger.info("Found token in environment variables.") + return FrozenAuthToken(token) diff --git a/py311/lib/python3.11/site-packages/botocore/translate.py b/py311/lib/python3.11/site-packages/botocore/translate.py new file mode 100644 index 0000000000000000000000000000000000000000..ecfe3bcaf46629a3d75bef81a60ac45e76cfa4db --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/translate.py @@ -0,0 +1,78 @@ +# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/ +# Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import copy + +from botocore.utils import merge_dicts + + +def build_retry_config( + endpoint_prefix, retry_model, definitions, client_retry_config=None +): + service_config = retry_model.get(endpoint_prefix, {}) + resolve_references(service_config, definitions) + # We want to merge the global defaults with the service specific + # defaults, with the service specific defaults taking precedence. + # So we use the global defaults as the base. + # + # A deepcopy is done on the retry defaults because it ensures the + # retry model has no chance of getting mutated when the service specific + # configuration or client retry config is merged in. + final_retry_config = { + '__default__': copy.deepcopy(retry_model.get('__default__', {})) + } + resolve_references(final_retry_config, definitions) + # The merge the service specific config on top. + merge_dicts(final_retry_config, service_config) + if client_retry_config is not None: + _merge_client_retry_config(final_retry_config, client_retry_config) + return final_retry_config + + +def _merge_client_retry_config(retry_config, client_retry_config): + max_retry_attempts_override = client_retry_config.get('max_attempts') + if max_retry_attempts_override is not None: + # In the retry config, the max_attempts refers to the maximum number + # of requests in general will be made. However, for the client's + # retry config it refers to how many retry attempts will be made at + # most. So to translate this number from the client config, one is + # added to convert it to the maximum number request that will be made + # by including the initial request. + # + # It is also important to note that if we ever support per operation + # configuration in the retry model via the client, we will need to + # revisit this logic to make sure max_attempts gets applied + # per operation. + retry_config['__default__']['max_attempts'] = ( + max_retry_attempts_override + 1 + ) + + +def resolve_references(config, definitions): + """Recursively replace $ref keys. + + To cut down on duplication, common definitions can be declared + (and passed in via the ``definitions`` attribute) and then + references as {"$ref": "name"}, when this happens the reference + dict is placed with the value from the ``definition`` dict. + + This is recursively done. + + """ + for key, value in config.items(): + if isinstance(value, dict): + if len(value) == 1 and list(value.keys())[0] == '$ref': + # Then we need to resolve this reference. + config[key] = definitions[list(value.values())[0]] + else: + resolve_references(value, definitions) diff --git a/py311/lib/python3.11/site-packages/botocore/useragent.py b/py311/lib/python3.11/site-packages/botocore/useragent.py new file mode 100644 index 0000000000000000000000000000000000000000..9054728efd80d3f562392c17a3702fa58e160361 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/useragent.py @@ -0,0 +1,672 @@ +# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +""" +NOTE: All classes and functions in this module are considered private and are +subject to abrupt breaking changes. Please do not use them directly. + +To modify the User-Agent header sent by botocore, use one of these +configuration options: +* The ``AWS_SDK_UA_APP_ID`` environment variable. +* The ``sdk_ua_app_id`` setting in the shared AWS config file. +* The ``user_agent_appid`` field in the :py:class:`botocore.config.Config`. +* The ``user_agent_extra`` field in the :py:class:`botocore.config.Config`. + +""" + +import logging +import os +import platform +from copy import copy +from string import ascii_letters, digits +from typing import NamedTuple, Optional + +from botocore import __version__ as botocore_version +from botocore.compat import HAS_CRT +from botocore.context import get_context + +logger = logging.getLogger(__name__) + + +_USERAGENT_ALLOWED_CHARACTERS = ascii_letters + digits + "!$%&'*+-.^_`|~," +_USERAGENT_ALLOWED_OS_NAMES = ( + 'windows', + 'linux', + 'macos', + 'android', + 'ios', + 'watchos', + 'tvos', + 'other', +) +_USERAGENT_PLATFORM_NAME_MAPPINGS = {'darwin': 'macos'} +# The name by which botocore is identified in the User-Agent header. While most +# AWS SDKs follow a naming pattern of "aws-sdk-*", botocore and boto3 continue +# using their existing values. Uses uppercase "B" with all other characters +# lowercase. +_USERAGENT_SDK_NAME = 'Botocore' +_USERAGENT_FEATURE_MAPPINGS = { + 'WAITER': 'B', + 'PAGINATOR': 'C', + "RETRY_MODE_LEGACY": "D", + "RETRY_MODE_STANDARD": "E", + "RETRY_MODE_ADAPTIVE": "F", + 'S3_TRANSFER': 'G', + 'GZIP_REQUEST_COMPRESSION': 'L', + 'PROTOCOL_RPC_V2_CBOR': 'M', + 'ENDPOINT_OVERRIDE': 'N', + 'ACCOUNT_ID_MODE_PREFERRED': 'P', + 'ACCOUNT_ID_MODE_DISABLED': 'Q', + 'ACCOUNT_ID_MODE_REQUIRED': 'R', + 'SIGV4A_SIGNING': 'S', + 'RESOLVED_ACCOUNT_ID': 'T', + 'FLEXIBLE_CHECKSUMS_REQ_CRC32': 'U', + 'FLEXIBLE_CHECKSUMS_REQ_CRC32C': 'V', + 'FLEXIBLE_CHECKSUMS_REQ_CRC64': 'W', + 'FLEXIBLE_CHECKSUMS_REQ_SHA1': 'X', + 'FLEXIBLE_CHECKSUMS_REQ_SHA256': 'Y', + 'FLEXIBLE_CHECKSUMS_REQ_WHEN_SUPPORTED': 'Z', + 'FLEXIBLE_CHECKSUMS_REQ_WHEN_REQUIRED': 'a', + 'FLEXIBLE_CHECKSUMS_RES_WHEN_SUPPORTED': 'b', + 'FLEXIBLE_CHECKSUMS_RES_WHEN_REQUIRED': 'c', + 'CREDENTIALS_CODE': 'e', + 'CREDENTIALS_ENV_VARS': 'g', + 'CREDENTIALS_ENV_VARS_STS_WEB_ID_TOKEN': 'h', + 'CREDENTIALS_STS_ASSUME_ROLE': 'i', + 'CREDENTIALS_STS_ASSUME_ROLE_WEB_ID': 'k', + 'CREDENTIALS_PROFILE': 'n', + 'CREDENTIALS_PROFILE_SOURCE_PROFILE': 'o', + 'CREDENTIALS_PROFILE_NAMED_PROVIDER': 'p', + 'CREDENTIALS_PROFILE_STS_WEB_ID_TOKEN': 'q', + 'CREDENTIALS_PROFILE_SSO': 'r', + 'CREDENTIALS_SSO': 's', + 'CREDENTIALS_PROFILE_SSO_LEGACY': 't', + 'CREDENTIALS_SSO_LEGACY': 'u', + 'CREDENTIALS_PROFILE_PROCESS': 'v', + 'CREDENTIALS_PROCESS': 'w', + 'CREDENTIALS_BOTO2_CONFIG_FILE': 'x', + 'CREDENTIALS_HTTP': 'z', + 'CREDENTIALS_IMDS': '0', + 'BEARER_SERVICE_ENV_VARS': '3', + 'CLI_V1_TO_V2_MIGRATION_DEBUG_MODE': '-', + 'CREDENTIALS_PROFILE_LOGIN': 'AC', + 'CREDENTIALS_LOGIN': 'AD', +} + + +def register_feature_id(feature_id): + """Adds metric value to the current context object's ``features`` set. + + :type feature_id: str + :param feature_id: The name of the feature to register. Value must be a key + in the ``_USERAGENT_FEATURE_MAPPINGS`` dict. + """ + ctx = get_context() + if ctx is None: + # Never register features outside the scope of a + # ``botocore.context.start_as_current_context`` context manager. + # Otherwise, the context variable won't be reset and features will + # bleed into all subsequent requests. Return instead of raising an + # exception since this function could be invoked in a public interface. + return + if val := _USERAGENT_FEATURE_MAPPINGS.get(feature_id): + ctx.features.add(val) + + +def register_feature_ids(feature_ids): + """Adds multiple feature IDs to the current context object's ``features`` set. + + :type feature_ids: iterable of str + :param feature_ids: An iterable of feature ID strings to register. Each + value must be a key in the ``_USERAGENT_FEATURE_MAPPINGS`` dict. + """ + for feature_id in feature_ids: + register_feature_id(feature_id) + + +def sanitize_user_agent_string_component(raw_str, allow_hash): + """Replaces all not allowed characters in the string with a dash ("-"). + + Allowed characters are ASCII alphanumerics and ``!$%&'*+-.^_`|~,``. If + ``allow_hash`` is ``True``, "#"``" is also allowed. + + :type raw_str: str + :param raw_str: The input string to be sanitized. + + :type allow_hash: bool + :param allow_hash: Whether "#" is considered an allowed character. + """ + return ''.join( + c + if c in _USERAGENT_ALLOWED_CHARACTERS or (allow_hash and c == '#') + else '-' + for c in raw_str + ) + + +class UserAgentComponentSizeConfig: + """ + Configures the max size of a built user agent string component and the + delimiter used to truncate the string if the size is above the max. + """ + + def __init__(self, max_size_in_bytes: int, delimiter: str): + self.max_size_in_bytes = max_size_in_bytes + self.delimiter = delimiter + self._validate_input() + + def _validate_input(self): + if self.max_size_in_bytes < 1: + raise ValueError( + f'Invalid `max_size_in_bytes`: {self.max_size_in_bytes}. ' + 'Value must be a positive integer.' + ) + + +class UserAgentComponent(NamedTuple): + """ + Component of a Botocore User-Agent header string in the standard format. + + Each component consists of a prefix, a name, a value, and a size_config. + In the string representation these are combined in the format + ``prefix/name#value``. + + ``size_config`` configures the max size and truncation strategy for the + built user agent string component. + + This class is considered private and is subject to abrupt breaking changes. + """ + + prefix: str + name: str + value: Optional[str] = None + size_config: Optional[UserAgentComponentSizeConfig] = None + + def to_string(self): + """Create string like 'prefix/name#value' from a UserAgentComponent.""" + clean_prefix = sanitize_user_agent_string_component( + self.prefix, allow_hash=True + ) + clean_name = sanitize_user_agent_string_component( + self.name, allow_hash=False + ) + if self.value is None or self.value == '': + clean_string = f'{clean_prefix}/{clean_name}' + else: + clean_value = sanitize_user_agent_string_component( + self.value, allow_hash=True + ) + clean_string = f'{clean_prefix}/{clean_name}#{clean_value}' + if self.size_config is not None: + clean_string = self._truncate_string( + clean_string, + self.size_config.max_size_in_bytes, + self.size_config.delimiter, + ) + return clean_string + + def _truncate_string(self, string, max_size, delimiter): + """ + Pop ``delimiter``-separated values until encoded string is less than or + equal to ``max_size``. + """ + orig = string + while len(string.encode('utf-8')) > max_size: + parts = string.split(delimiter) + parts.pop() + string = delimiter.join(parts) + + if string == '': + logger.debug( + "User agent component `%s` could not be truncated to " + "`%s` bytes with delimiter " + "`%s` without losing all contents. " + "Value will be omitted from user agent string.", + orig, + max_size, + delimiter, + ) + return string + + +class RawStringUserAgentComponent: + """ + UserAgentComponent interface wrapper around ``str``. + + Use for User-Agent header components that are not constructed from + prefix+name+value but instead are provided as strings. No sanitization is + performed. + """ + + def __init__(self, value): + self._value = value + + def to_string(self): + return self._value + + +# This is not a public interface and is subject to abrupt breaking changes. +# Any usage is not advised or supported in external code bases. +try: + from botocore.customizations.useragent import modify_components +except ImportError: + # Default implementation that returns unmodified User-Agent components. + def modify_components(components): + return components + + +class UserAgentString: + """ + Generator for AWS SDK User-Agent header strings. + + The User-Agent header format contains information from session, client, and + request context. ``UserAgentString`` provides methods for collecting the + information and ``to_string`` for assembling it into the standardized + string format. + + Example usage: + + ua_session = UserAgentString.from_environment() + ua_session.set_session_config(...) + ua_client = ua_session.with_client_config(Config(...)) + ua_string = ua_request.to_string() + + For testing or when information from all sources is available at the same + time, the methods can be chained: + + ua_string = ( + UserAgentString + .from_environment() + .set_session_config(...) + .with_client_config(Config(...)) + .to_string() + ) + + """ + + def __init__( + self, + platform_name, + platform_version, + platform_machine, + python_version, + python_implementation, + execution_env, + crt_version=None, + ): + """ + :type platform_name: str + :param platform_name: Name of the operating system or equivalent + platform name. Should be sourced from :py:meth:`platform.system`. + :type platform_version: str + :param platform_version: Version of the operating system or equivalent + platform name. Should be sourced from :py:meth:`platform.version`. + :type platform_machine: str + :param platform_version: Processor architecture or machine type. For + example "x86_64". Should be sourced from :py:meth:`platform.machine`. + :type python_version: str + :param python_version: Version of the python implementation as str. + Should be sourced from :py:meth:`platform.python_version`. + :type python_implementation: str + :param python_implementation: Name of the python implementation. + Should be sourced from :py:meth:`platform.python_implementation`. + :type execution_env: str + :param execution_env: The value of the AWS execution environment. + Should be sourced from the ``AWS_EXECUTION_ENV` environment + variable. + :type crt_version: str + :param crt_version: Version string of awscrt package, if installed. + """ + self._platform_name = platform_name + self._platform_version = platform_version + self._platform_machine = platform_machine + self._python_version = python_version + self._python_implementation = python_implementation + self._execution_env = execution_env + self._crt_version = crt_version + + # Components that can be added with ``set_session_config()`` + self._session_user_agent_name = None + self._session_user_agent_version = None + self._session_user_agent_extra = None + + self._client_config = None + + # Component that can be set with ``set_client_features()`` + self._client_features = None + + @classmethod + def from_environment(cls): + crt_version = None + if HAS_CRT: + crt_version = _get_crt_version() or 'Unknown' + return cls( + platform_name=platform.system(), + platform_version=platform.release(), + platform_machine=platform.machine(), + python_version=platform.python_version(), + python_implementation=platform.python_implementation(), + execution_env=os.environ.get('AWS_EXECUTION_ENV'), + crt_version=crt_version, + ) + + def set_session_config( + self, + session_user_agent_name, + session_user_agent_version, + session_user_agent_extra, + ): + """ + Set the user agent configuration values that apply at session level. + + :param user_agent_name: The user agent name configured in the + :py:class:`botocore.session.Session` object. For backwards + compatibility, this will always be at the beginning of the + User-Agent string, together with ``user_agent_version``. + :param user_agent_version: The user agent version configured in the + :py:class:`botocore.session.Session` object. + :param user_agent_extra: The user agent "extra" configured in the + :py:class:`botocore.session.Session` object. + """ + self._session_user_agent_name = session_user_agent_name + self._session_user_agent_version = session_user_agent_version + self._session_user_agent_extra = session_user_agent_extra + return self + + def set_client_features(self, features): + """ + Persist client-specific features registered before or during client + creation. + + :type features: Set[str] + :param features: A set of client-specific features. + """ + self._client_features = features + + def with_client_config(self, client_config): + """ + Create a copy with all original values and client-specific values. + + :type client_config: botocore.config.Config + :param client_config: The client configuration object. + """ + cp = copy(self) + cp._client_config = client_config + return cp + + def to_string(self): + """ + Build User-Agent header string from the object's properties. + """ + config_ua_override = None + if self._client_config: + if hasattr(self._client_config, '_supplied_user_agent'): + config_ua_override = self._client_config._supplied_user_agent + else: + config_ua_override = self._client_config.user_agent + + if config_ua_override is not None: + return self._build_legacy_ua_string(config_ua_override) + + components = [ + *self._build_sdk_metadata(), + RawStringUserAgentComponent('ua/2.1'), + *self._build_os_metadata(), + *self._build_architecture_metadata(), + *self._build_language_metadata(), + *self._build_execution_env_metadata(), + *self._build_feature_metadata(), + *self._build_config_metadata(), + *self._build_app_id(), + *self._build_extra(), + ] + + components = modify_components(components) + + return ' '.join( + [comp.to_string() for comp in components if comp.to_string()] + ) + + def _build_sdk_metadata(self): + """ + Build the SDK name and version component of the User-Agent header. + + For backwards-compatibility both session-level and client-level config + of custom tool names are honored. If this removes the Botocore + information from the start of the string, Botocore's name and version + are included as a separate field with "md" prefix. + """ + sdk_md = [] + if ( + self._session_user_agent_name + and self._session_user_agent_version + and ( + self._session_user_agent_name != _USERAGENT_SDK_NAME + or self._session_user_agent_version != botocore_version + ) + ): + sdk_md.extend( + [ + UserAgentComponent( + self._session_user_agent_name, + self._session_user_agent_version, + ), + UserAgentComponent( + 'md', _USERAGENT_SDK_NAME, botocore_version + ), + ] + ) + else: + sdk_md.append( + UserAgentComponent(_USERAGENT_SDK_NAME, botocore_version) + ) + + if self._crt_version is not None: + sdk_md.append( + UserAgentComponent('md', 'awscrt', self._crt_version) + ) + + return sdk_md + + def _build_os_metadata(self): + """ + Build the OS/platform components of the User-Agent header string. + + For recognized platform names that match or map to an entry in the list + of standardized OS names, a single component with prefix "os" is + returned. Otherwise, one component "os/other" is returned and a second + with prefix "md" and the raw platform name. + + String representations of example return values: + * ``os/macos#10.13.6`` + * ``os/linux`` + * ``os/other`` + * ``os/other md/foobar#1.2.3`` + """ + if self._platform_name is None: + return [UserAgentComponent('os', 'other')] + + plt_name_lower = self._platform_name.lower() + if plt_name_lower in _USERAGENT_ALLOWED_OS_NAMES: + os_family = plt_name_lower + elif plt_name_lower in _USERAGENT_PLATFORM_NAME_MAPPINGS: + os_family = _USERAGENT_PLATFORM_NAME_MAPPINGS[plt_name_lower] + else: + os_family = None + + if os_family is not None: + return [ + UserAgentComponent('os', os_family, self._platform_version) + ] + else: + return [ + UserAgentComponent('os', 'other'), + UserAgentComponent( + 'md', self._platform_name, self._platform_version + ), + ] + + def _build_architecture_metadata(self): + """ + Build architecture component of the User-Agent header string. + + Returns the machine type with prefix "md" and name "arch", if one is + available. Common values include "x86_64", "arm64", "i386". + """ + if self._platform_machine: + return [ + UserAgentComponent( + 'md', 'arch', self._platform_machine.lower() + ) + ] + return [] + + def _build_language_metadata(self): + """ + Build the language components of the User-Agent header string. + + Returns the Python version in a component with prefix "lang" and name + "python". The Python implementation (e.g. CPython, PyPy) is returned as + separate metadata component with prefix "md" and name "pyimpl". + + String representation of an example return value: + ``lang/python#3.10.4 md/pyimpl#CPython`` + """ + lang_md = [ + UserAgentComponent('lang', 'python', self._python_version), + ] + if self._python_implementation: + lang_md.append( + UserAgentComponent('md', 'pyimpl', self._python_implementation) + ) + return lang_md + + def _build_execution_env_metadata(self): + """ + Build the execution environment component of the User-Agent header. + + Returns a single component prefixed with "exec-env", usually sourced + from the environment variable AWS_EXECUTION_ENV. + """ + if self._execution_env: + return [UserAgentComponent('exec-env', self._execution_env)] + else: + return [] + + def _build_feature_metadata(self): + """ + Build the features component of the User-Agent header string. + + Returns a single component with prefix "m" followed by a list of + comma-separated metric values. + """ + ctx = get_context() + context_features = set() if ctx is None else ctx.features + client_features = self._client_features or set() + features = client_features.union(context_features) + if not features: + return [] + size_config = UserAgentComponentSizeConfig(1024, ',') + return [ + UserAgentComponent( + 'm', ','.join(features), size_config=size_config + ) + ] + + def _build_config_metadata(self): + """ + Build the configuration components of the User-Agent header string. + + Returns a list of components with prefix "cfg" followed by the config + setting name and its value. Tracked configuration settings may be + added or removed in future versions. + """ + if not self._client_config or not self._client_config.retries: + return [] + retry_mode = self._client_config.retries.get('mode') + cfg_md = [UserAgentComponent('cfg', 'retry-mode', retry_mode)] + if self._client_config.endpoint_discovery_enabled: + cfg_md.append(UserAgentComponent('cfg', 'endpoint-discovery')) + return cfg_md + + def _build_app_id(self): + """ + Build app component of the User-Agent header string. + + Returns a single component with prefix "app" and value sourced from the + ``user_agent_appid`` field in :py:class:`botocore.config.Config` or + the ``sdk_ua_app_id`` setting in the shared configuration file, or the + ``AWS_SDK_UA_APP_ID`` environment variable. These are the recommended + ways for apps built with Botocore to insert their identifer into the + User-Agent header. + """ + if self._client_config and self._client_config.user_agent_appid: + appid = sanitize_user_agent_string_component( + raw_str=self._client_config.user_agent_appid, allow_hash=True + ) + return [RawStringUserAgentComponent(f'app/{appid}')] + else: + return [] + + def _build_extra(self): + """User agent string components based on legacy "extra" settings. + + Creates components from the session-level and client-level + ``user_agent_extra`` setting, if present. Both are passed through + verbatim and should be appended at the end of the string. + + Preferred ways to inject application-specific information into + botocore's User-Agent header string are the ``user_agent_appid` field + in :py:class:`botocore.config.Config`. The ``AWS_SDK_UA_APP_ID`` + environment variable and the ``sdk_ua_app_id`` configuration file + setting are alternative ways to set the ``user_agent_appid`` config. + """ + extra = [] + if self._session_user_agent_extra: + extra.append( + RawStringUserAgentComponent(self._session_user_agent_extra) + ) + if self._client_config and self._client_config.user_agent_extra: + extra.append( + RawStringUserAgentComponent( + self._client_config.user_agent_extra + ) + ) + return extra + + def _build_legacy_ua_string(self, config_ua_override): + components = [config_ua_override] + if self._session_user_agent_extra: + components.append(self._session_user_agent_extra) + if self._client_config.user_agent_extra: + components.append(self._client_config.user_agent_extra) + return ' '.join(components) + + def rebuild_and_replace_user_agent_handler( + self, operation_name, request, **kwargs + ): + ua_string = self.to_string() + if request.headers.get('User-Agent'): + request.headers.replace_header('User-Agent', ua_string) + + +def _get_crt_version(): + """ + This function is considered private and is subject to abrupt breaking + changes. + """ + try: + import awscrt + + return awscrt.__version__ + except AttributeError: + return None diff --git a/py311/lib/python3.11/site-packages/botocore/utils.py b/py311/lib/python3.11/site-packages/botocore/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..79f93370427e922a1bb898b95a6d6ee0bdb3e224 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/utils.py @@ -0,0 +1,3730 @@ +# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import base64 +import binascii +import datetime +import email.message +import functools +import hashlib +import io +import logging +import os +import random +import re +import socket +import tempfile +import time +import warnings +import weakref +from datetime import datetime as _DatetimeClass +from ipaddress import ip_address +from pathlib import Path +from urllib.request import getproxies, proxy_bypass + +import dateutil.parser +from dateutil.tz import tzutc +from urllib3.exceptions import LocationParseError + +import botocore +import botocore.awsrequest +import botocore.httpsession + +# IP Regexes retained for backwards compatibility +from botocore.compat import ( + HAS_CRT, + HEX_PAT, # noqa: F401 + IPV4_PAT, # noqa: F401 + IPV4_RE, + IPV6_ADDRZ_PAT, # noqa: F401 + IPV6_ADDRZ_RE, + IPV6_PAT, # noqa: F401 + LS32_PAT, # noqa: F401 + MD5_AVAILABLE, + UNRESERVED_PAT, # noqa: F401 + UNSAFE_URL_CHARS, + ZONE_ID_PAT, # noqa: F401 + OrderedDict, + get_current_datetime, + get_md5, + get_tzinfo_options, + json, + quote, + urlparse, + urlsplit, + urlunsplit, + zip_longest, +) +from botocore.exceptions import ( + ClientError, + ConfigNotFound, + ConnectionClosedError, + ConnectTimeoutError, + EndpointConnectionError, + HTTPClientError, + InvalidDNSNameError, + InvalidEndpointConfigurationError, + InvalidExpressionError, + InvalidHostLabelError, + InvalidIMDSEndpointError, + InvalidIMDSEndpointModeError, + InvalidRegionError, + MetadataRetrievalError, + MissingDependencyException, + ReadTimeoutError, + SSOTokenLoadError, + UnsupportedOutpostResourceError, + UnsupportedS3AccesspointConfigurationError, + UnsupportedS3ArnError, + UnsupportedS3ConfigurationError, + UnsupportedS3ControlArnError, + UnsupportedS3ControlConfigurationError, +) +from botocore.plugin import ( + PluginContext, + reset_plugin_context, + set_plugin_context, +) + +logger = logging.getLogger(__name__) +DEFAULT_METADATA_SERVICE_TIMEOUT = 1 +METADATA_BASE_URL = 'http://169.254.169.254/' +METADATA_BASE_URL_IPv6 = 'http://[fd00:ec2::254]/' +METADATA_ENDPOINT_MODES = ('ipv4', 'ipv6') + +# These are chars that do not need to be urlencoded. +# Based on rfc2986, section 2.3 +SAFE_CHARS = '-._~' +LABEL_RE = re.compile(r'[a-z0-9][a-z0-9\-]*[a-z0-9]') +RETRYABLE_HTTP_ERRORS = ( + ReadTimeoutError, + EndpointConnectionError, + ConnectionClosedError, + ConnectTimeoutError, +) +S3_ACCELERATE_WHITELIST = ['dualstack'] +# In switching events from using service name / endpoint prefix to service +# id, we have to preserve compatibility. This maps the instances where either +# is different than the transformed service id. +EVENT_ALIASES = { + "api.mediatailor": "mediatailor", + "api.pricing": "pricing", + "api.sagemaker": "sagemaker", + "apigateway": "api-gateway", + "application-autoscaling": "application-auto-scaling", + "appstream2": "appstream", + "autoscaling": "auto-scaling", + "autoscaling-plans": "auto-scaling-plans", + "ce": "cost-explorer", + "cloudhsmv2": "cloudhsm-v2", + "cloudsearchdomain": "cloudsearch-domain", + "cognito-idp": "cognito-identity-provider", + "config": "config-service", + "cur": "cost-and-usage-report-service", + "data.iot": "iot-data-plane", + "data.jobs.iot": "iot-jobs-data-plane", + "data.mediastore": "mediastore-data", + "datapipeline": "data-pipeline", + "devicefarm": "device-farm", + "directconnect": "direct-connect", + "discovery": "application-discovery-service", + "dms": "database-migration-service", + "ds": "directory-service", + "dynamodbstreams": "dynamodb-streams", + "elasticbeanstalk": "elastic-beanstalk", + "elasticfilesystem": "efs", + "elasticloadbalancing": "elastic-load-balancing", + "elasticmapreduce": "emr", + "elb": "elastic-load-balancing", + "elbv2": "elastic-load-balancing-v2", + "email": "ses", + "entitlement.marketplace": "marketplace-entitlement-service", + "es": "elasticsearch-service", + "events": "eventbridge", + "cloudwatch-events": "eventbridge", + "iot-data": "iot-data-plane", + "iot-jobs-data": "iot-jobs-data-plane", + "kinesisanalytics": "kinesis-analytics", + "kinesisvideo": "kinesis-video", + "lex-models": "lex-model-building-service", + "lex-runtime": "lex-runtime-service", + "logs": "cloudwatch-logs", + "machinelearning": "machine-learning", + "marketplace-entitlement": "marketplace-entitlement-service", + "marketplacecommerceanalytics": "marketplace-commerce-analytics", + "metering.marketplace": "marketplace-metering", + "meteringmarketplace": "marketplace-metering", + "mgh": "migration-hub", + "models.lex": "lex-model-building-service", + "monitoring": "cloudwatch", + "mturk-requester": "mturk", + "resourcegroupstaggingapi": "resource-groups-tagging-api", + "route53": "route-53", + "route53domains": "route-53-domains", + "runtime.lex": "lex-runtime-service", + "runtime.sagemaker": "sagemaker-runtime", + "sdb": "simpledb", + "secretsmanager": "secrets-manager", + "serverlessrepo": "serverlessapplicationrepository", + "servicecatalog": "service-catalog", + "states": "sfn", + "stepfunctions": "sfn", + "storagegateway": "storage-gateway", + "streams.dynamodb": "dynamodb-streams", + "tagging": "resource-groups-tagging-api", +} + + +# This pattern can be used to detect if a header is a flexible checksum header +CHECKSUM_HEADER_PATTERN = re.compile( + r'^X-Amz-Checksum-([a-z0-9]*)$', + flags=re.IGNORECASE, +) + +PRIORITY_ORDERED_SUPPORTED_PROTOCOLS = ( + 'json', + 'rest-json', + 'rest-xml', + 'smithy-rpc-v2-cbor', + 'query', + 'ec2', +) + + +def ensure_boolean(val): + """Ensures a boolean value if a string or boolean is provided + + For strings, the value for True/False is case insensitive + """ + if isinstance(val, bool): + return val + elif isinstance(val, str): + return val.lower() == 'true' + else: + return False + + +def resolve_imds_endpoint_mode(session): + """Resolving IMDS endpoint mode to either IPv6 or IPv4. + + ec2_metadata_service_endpoint_mode takes precedence over imds_use_ipv6. + """ + endpoint_mode = session.get_config_variable( + 'ec2_metadata_service_endpoint_mode' + ) + if endpoint_mode is not None: + lendpoint_mode = endpoint_mode.lower() + if lendpoint_mode not in METADATA_ENDPOINT_MODES: + error_msg_kwargs = { + 'mode': endpoint_mode, + 'valid_modes': METADATA_ENDPOINT_MODES, + } + raise InvalidIMDSEndpointModeError(**error_msg_kwargs) + return lendpoint_mode + elif session.get_config_variable('imds_use_ipv6'): + return 'ipv6' + return 'ipv4' + + +def is_json_value_header(shape): + """Determines if the provided shape is the special header type jsonvalue. + + :type shape: botocore.shape + :param shape: Shape to be inspected for the jsonvalue trait. + + :return: True if this type is a jsonvalue, False otherwise + :rtype: Bool + """ + return ( + hasattr(shape, 'serialization') + and shape.serialization.get('jsonvalue', False) + and shape.serialization.get('location') == 'header' + and shape.type_name == 'string' + ) + + +def has_header(header_name, headers): + """Case-insensitive check for header key.""" + if header_name is None: + return False + elif isinstance(headers, botocore.awsrequest.HeadersDict): + return header_name in headers + else: + return header_name.lower() in [key.lower() for key in headers.keys()] + + +def get_service_module_name(service_model): + """Returns the module name for a service + + This is the value used in both the documentation and client class name + """ + name = service_model.metadata.get( + 'serviceAbbreviation', + service_model.metadata.get( + 'serviceFullName', service_model.service_name + ), + ) + name = name.replace('Amazon', '') + name = name.replace('AWS', '') + name = re.sub(r'\W+', '', name) + return name + + +def normalize_url_path(path): + if not path: + return '/' + return remove_dot_segments(path) + + +def normalize_boolean(val): + """Returns None if val is None, otherwise ensure value + converted to boolean""" + if val is None: + return val + else: + return ensure_boolean(val) + + +def remove_dot_segments(url): + # RFC 3986, section 5.2.4 "Remove Dot Segments" + # Also, AWS services require consecutive slashes to be removed, + # so that's done here as well + if not url: + return '' + input_url = url.split('/') + output_list = [] + for x in input_url: + if x and x != '.': + if x == '..': + if output_list: + output_list.pop() + else: + output_list.append(x) + + if url[0] == '/': + first = '/' + else: + first = '' + if url[-1] == '/' and output_list: + last = '/' + else: + last = '' + return first + '/'.join(output_list) + last + + +def validate_jmespath_for_set(expression): + # Validates a limited jmespath expression to determine if we can set a + # value based on it. Only works with dotted paths. + if not expression or expression == '.': + raise InvalidExpressionError(expression=expression) + + for invalid in ['[', ']', '*']: + if invalid in expression: + raise InvalidExpressionError(expression=expression) + + +def set_value_from_jmespath(source, expression, value, is_first=True): + # This takes a (limited) jmespath-like expression & can set a value based + # on it. + # Limitations: + # * Only handles dotted lookups + # * No offsets/wildcards/slices/etc. + if is_first: + validate_jmespath_for_set(expression) + + bits = expression.split('.', 1) + current_key, remainder = bits[0], bits[1] if len(bits) > 1 else '' + + if not current_key: + raise InvalidExpressionError(expression=expression) + + if remainder: + if current_key not in source: + # We've got something in the expression that's not present in the + # source (new key). If there's any more bits, we'll set the key + # with an empty dictionary. + source[current_key] = {} + + return set_value_from_jmespath( + source[current_key], remainder, value, is_first=False + ) + + # If we're down to a single key, set it. + source[current_key] = value + + +def is_global_accesspoint(context): + """Determine if request is intended for an MRAP accesspoint.""" + s3_accesspoint = context.get('s3_accesspoint', {}) + is_global = s3_accesspoint.get('region') == '' + return is_global + + +def create_nested_client(session, service_name, **kwargs): + # If a client is created from within a plugin based on the environment variable, + # an infinite loop could arise. Any clients created from within another client + # must use this method to prevent infinite loops. + ctx = PluginContext(plugins="DISABLED") + token = set_plugin_context(ctx) + try: + return session.create_client(service_name, **kwargs) + finally: + reset_plugin_context(token) + + +class _RetriesExceededError(Exception): + """Internal exception used when the number of retries are exceeded.""" + + pass + + +class BadIMDSRequestError(Exception): + def __init__(self, request): + self.request = request + + +class IMDSFetcher: + _RETRIES_EXCEEDED_ERROR_CLS = _RetriesExceededError + _TOKEN_PATH = 'latest/api/token' + _TOKEN_TTL = '21600' + + def __init__( + self, + timeout=DEFAULT_METADATA_SERVICE_TIMEOUT, + num_attempts=1, + base_url=METADATA_BASE_URL, + env=None, + user_agent=None, + config=None, + ): + self._timeout = timeout + self._num_attempts = num_attempts + if config is None: + config = {} + self._base_url = self._select_base_url(base_url, config) + self._config = config + + if env is None: + env = os.environ.copy() + self._disabled = ( + env.get('AWS_EC2_METADATA_DISABLED', 'false').lower() == 'true' + ) + self._imds_v1_disabled = config.get('ec2_metadata_v1_disabled') + self._user_agent = user_agent + self._session = botocore.httpsession.URLLib3Session( + timeout=self._timeout, + proxies=get_environ_proxies(self._base_url), + ) + + def get_base_url(self): + return self._base_url + + def _select_base_url(self, base_url, config): + if config is None: + config = {} + + requires_ipv6 = ( + config.get('ec2_metadata_service_endpoint_mode') == 'ipv6' + ) + custom_metadata_endpoint = config.get('ec2_metadata_service_endpoint') + + if requires_ipv6 and custom_metadata_endpoint: + logger.warning( + "Custom endpoint and IMDS_USE_IPV6 are both set. Using custom endpoint." + ) + + chosen_base_url = None + + if base_url != METADATA_BASE_URL: + chosen_base_url = base_url + elif custom_metadata_endpoint: + chosen_base_url = custom_metadata_endpoint + elif requires_ipv6: + chosen_base_url = METADATA_BASE_URL_IPv6 + else: + chosen_base_url = METADATA_BASE_URL + + logger.debug("IMDS ENDPOINT: %s", chosen_base_url) + if not is_valid_uri(chosen_base_url): + raise InvalidIMDSEndpointError(endpoint=chosen_base_url) + + return chosen_base_url + + def _construct_url(self, path): + sep = '' + if self._base_url and not self._base_url.endswith('/'): + sep = '/' + return f'{self._base_url}{sep}{path}' + + def _fetch_metadata_token(self): + self._assert_enabled() + url = self._construct_url(self._TOKEN_PATH) + headers = { + 'x-aws-ec2-metadata-token-ttl-seconds': self._TOKEN_TTL, + } + self._add_user_agent(headers) + request = botocore.awsrequest.AWSRequest( + method='PUT', url=url, headers=headers + ) + for i in range(self._num_attempts): + try: + response = self._session.send(request.prepare()) + if response.status_code == 200: + return response.text + elif response.status_code in (404, 403, 405): + return None + elif response.status_code in (400,): + raise BadIMDSRequestError(request) + except ReadTimeoutError: + return None + except RETRYABLE_HTTP_ERRORS as e: + logger.debug( + "Caught retryable HTTP exception while making metadata " + "service request to %s: %s", + url, + e, + exc_info=True, + ) + except HTTPClientError as e: + if isinstance(e.kwargs.get('error'), LocationParseError): + raise InvalidIMDSEndpointError(endpoint=url, error=e) + else: + raise + return None + + def _get_request(self, url_path, retry_func, token=None): + """Make a get request to the Instance Metadata Service. + + :type url_path: str + :param url_path: The path component of the URL to make a get request. + This arg is appended to the base_url that was provided in the + initializer. + + :type retry_func: callable + :param retry_func: A function that takes the response as an argument + and determines if it needs to retry. By default empty and non + 200 OK responses are retried. + + :type token: str + :param token: Metadata token to send along with GET requests to IMDS. + """ + self._assert_enabled() + if not token: + self._assert_v1_enabled() + if retry_func is None: + retry_func = self._default_retry + url = self._construct_url(url_path) + headers = {} + if token is not None: + headers['x-aws-ec2-metadata-token'] = token + self._add_user_agent(headers) + for i in range(self._num_attempts): + try: + request = botocore.awsrequest.AWSRequest( + method='GET', url=url, headers=headers + ) + response = self._session.send(request.prepare()) + if not retry_func(response): + return response + except RETRYABLE_HTTP_ERRORS as e: + logger.debug( + "Caught retryable HTTP exception while making metadata " + "service request to %s: %s", + url, + e, + exc_info=True, + ) + raise self._RETRIES_EXCEEDED_ERROR_CLS() + + def _add_user_agent(self, headers): + if self._user_agent is not None: + headers['User-Agent'] = self._user_agent + + def _assert_enabled(self): + if self._disabled: + logger.debug("Access to EC2 metadata has been disabled.") + raise self._RETRIES_EXCEEDED_ERROR_CLS() + + def _assert_v1_enabled(self): + if self._imds_v1_disabled: + raise MetadataRetrievalError( + error_msg="Unable to retrieve token for use in IMDSv2 call and IMDSv1 has been disabled" + ) + + def _default_retry(self, response): + return self._is_non_ok_response(response) or self._is_empty(response) + + def _is_non_ok_response(self, response): + if response.status_code != 200: + self._log_imds_response(response, 'non-200', log_body=True) + return True + return False + + def _is_empty(self, response): + if not response.content: + self._log_imds_response(response, 'no body', log_body=True) + return True + return False + + def _log_imds_response(self, response, reason_to_log, log_body=False): + statement = ( + "Metadata service returned %s response " + "with status code of %s for url: %s" + ) + logger_args = [reason_to_log, response.status_code, response.url] + if log_body: + statement += ", content body: %s" + logger_args.append(response.content) + logger.debug(statement, *logger_args) + + +class InstanceMetadataFetcher(IMDSFetcher): + _URL_PATH = 'latest/meta-data/iam/security-credentials/' + _REQUIRED_CREDENTIAL_FIELDS = [ + 'AccessKeyId', + 'SecretAccessKey', + 'Token', + 'Expiration', + ] + + def retrieve_iam_role_credentials(self): + try: + token = self._fetch_metadata_token() + role_name = self._get_iam_role(token) + credentials = self._get_credentials(role_name, token) + if self._contains_all_credential_fields(credentials): + credentials = { + 'role_name': role_name, + 'access_key': credentials['AccessKeyId'], + 'secret_key': credentials['SecretAccessKey'], + 'token': credentials['Token'], + 'expiry_time': credentials['Expiration'], + } + self._evaluate_expiration(credentials) + return credentials + else: + # IMDS can return a 200 response that has a JSON formatted + # error message (i.e. if ec2 is not trusted entity for the + # attached role). We do not necessarily want to retry for + # these and we also do not necessarily want to raise a key + # error. So at least log the problematic response and return + # an empty dictionary to signal that it was not able to + # retrieve credentials. These error will contain both a + # Code and Message key. + if 'Code' in credentials and 'Message' in credentials: + logger.debug( + 'Error response received when retrieving' + 'credentials: %s.', + credentials, + ) + return {} + except self._RETRIES_EXCEEDED_ERROR_CLS: + logger.debug( + "Max number of attempts exceeded (%s) when " + "attempting to retrieve data from metadata service.", + self._num_attempts, + ) + except BadIMDSRequestError as e: + logger.debug("Bad IMDS request: %s", e.request) + return {} + + def _get_iam_role(self, token=None): + return self._get_request( + url_path=self._URL_PATH, + retry_func=self._needs_retry_for_role_name, + token=token, + ).text + + def _get_credentials(self, role_name, token=None): + r = self._get_request( + url_path=self._URL_PATH + role_name, + retry_func=self._needs_retry_for_credentials, + token=token, + ) + return json.loads(r.text) + + def _is_invalid_json(self, response): + try: + json.loads(response.text) + return False + except ValueError: + self._log_imds_response(response, 'invalid json') + return True + + def _needs_retry_for_role_name(self, response): + return self._is_non_ok_response(response) or self._is_empty(response) + + def _needs_retry_for_credentials(self, response): + return ( + self._is_non_ok_response(response) + or self._is_empty(response) + or self._is_invalid_json(response) + ) + + def _contains_all_credential_fields(self, credentials): + for field in self._REQUIRED_CREDENTIAL_FIELDS: + if field not in credentials: + logger.debug( + 'Retrieved credentials is missing required field: %s', + field, + ) + return False + return True + + def _evaluate_expiration(self, credentials): + expiration = credentials.get("expiry_time") + if expiration is None: + return + try: + expiration = datetime.datetime.strptime( + expiration, "%Y-%m-%dT%H:%M:%SZ" + ) + refresh_interval = self._config.get( + "ec2_credential_refresh_window", 60 * 10 + ) + jitter = random.randint(120, 600) # Between 2 to 10 minutes + refresh_interval_with_jitter = refresh_interval + jitter + current_time = get_current_datetime() + refresh_offset = datetime.timedelta( + seconds=refresh_interval_with_jitter + ) + extension_time = expiration - refresh_offset + if current_time >= extension_time: + new_time = current_time + refresh_offset + credentials["expiry_time"] = new_time.strftime( + "%Y-%m-%dT%H:%M:%SZ" + ) + logger.info( + "Attempting credential expiration extension due to a " + "credential service availability issue. A refresh of " + "these credentials will be attempted again within " + "the next %.0f minutes.", + refresh_interval_with_jitter / 60, + ) + except ValueError: + logger.debug( + "Unable to parse expiry_time in %s", credentials['expiry_time'] + ) + + +class IMDSRegionProvider: + def __init__(self, session, environ=None, fetcher=None): + """Initialize IMDSRegionProvider. + :type session: :class:`botocore.session.Session` + :param session: The session is needed to look up configuration for + how to contact the instance metadata service. Specifically the + whether or not it should use the IMDS region at all, and if so how + to configure the timeout and number of attempts to reach the + service. + :type environ: None or dict + :param environ: A dictionary of environment variables to use. If + ``None`` is the argument then ``os.environ`` will be used by + default. + :type fecther: :class:`botocore.utils.InstanceMetadataRegionFetcher` + :param fetcher: The class to actually handle the fetching of the region + from the IMDS. If not provided a default one will be created. + """ + self._session = session + if environ is None: + environ = os.environ + self._environ = environ + self._fetcher = fetcher + + def provide(self): + """Provide the region value from IMDS.""" + instance_region = self._get_instance_metadata_region() + return instance_region + + def _get_instance_metadata_region(self): + fetcher = self._get_fetcher() + region = fetcher.retrieve_region() + return region + + def _get_fetcher(self): + if self._fetcher is None: + self._fetcher = self._create_fetcher() + return self._fetcher + + def _create_fetcher(self): + metadata_timeout = self._session.get_config_variable( + 'metadata_service_timeout' + ) + metadata_num_attempts = self._session.get_config_variable( + 'metadata_service_num_attempts' + ) + imds_config = { + 'ec2_metadata_service_endpoint': self._session.get_config_variable( + 'ec2_metadata_service_endpoint' + ), + 'ec2_metadata_service_endpoint_mode': resolve_imds_endpoint_mode( + self._session + ), + 'ec2_metadata_v1_disabled': self._session.get_config_variable( + 'ec2_metadata_v1_disabled' + ), + } + fetcher = InstanceMetadataRegionFetcher( + timeout=metadata_timeout, + num_attempts=metadata_num_attempts, + env=self._environ, + user_agent=self._session.user_agent(), + config=imds_config, + ) + return fetcher + + +class InstanceMetadataRegionFetcher(IMDSFetcher): + _URL_PATH = 'latest/meta-data/placement/availability-zone/' + + def retrieve_region(self): + """Get the current region from the instance metadata service. + :rvalue: str + :returns: The region the current instance is running in or None + if the instance metadata service cannot be contacted or does not + give a valid response. + :rtype: None or str + :returns: Returns the region as a string if it is configured to use + IMDS as a region source. Otherwise returns ``None``. It will also + return ``None`` if it fails to get the region from IMDS due to + exhausting its retries or not being able to connect. + """ + try: + region = self._get_region() + return region + except self._RETRIES_EXCEEDED_ERROR_CLS: + logger.debug( + "Max number of attempts exceeded (%s) when " + "attempting to retrieve data from metadata service.", + self._num_attempts, + ) + return None + + def _get_region(self): + token = self._fetch_metadata_token() + response = self._get_request( + url_path=self._URL_PATH, + retry_func=self._default_retry, + token=token, + ) + availability_zone = response.text + region = availability_zone[:-1] + return region + + +def merge_dicts(dict1, dict2, append_lists=False): + """Given two dict, merge the second dict into the first. + + The dicts can have arbitrary nesting. + + :param append_lists: If true, instead of clobbering a list with the new + value, append all of the new values onto the original list. + """ + for key in dict2: + if isinstance(dict2[key], dict): + if key in dict1 and key in dict2: + merge_dicts(dict1[key], dict2[key]) + else: + dict1[key] = dict2[key] + # If the value is a list and the ``append_lists`` flag is set, + # append the new values onto the original list + elif isinstance(dict2[key], list) and append_lists: + # The value in dict1 must be a list in order to append new + # values onto it. + if key in dict1 and isinstance(dict1[key], list): + dict1[key].extend(dict2[key]) + else: + dict1[key] = dict2[key] + else: + # At scalar types, we iterate and merge the + # current dict that we're on. + dict1[key] = dict2[key] + + +def lowercase_dict(original): + """Copies the given dictionary ensuring all keys are lowercase strings.""" + copy = {} + for key in original: + copy[key.lower()] = original[key] + return copy + + +def parse_key_val_file(filename, _open=open): + try: + with _open(filename) as f: + contents = f.read() + return parse_key_val_file_contents(contents) + except OSError: + raise ConfigNotFound(path=filename) + + +def parse_key_val_file_contents(contents): + # This was originally extracted from the EC2 credential provider, which was + # fairly lenient in its parsing. We only try to parse key/val pairs if + # there's a '=' in the line. + final = {} + for line in contents.splitlines(): + if '=' not in line: + continue + key, val = line.split('=', 1) + key = key.strip() + val = val.strip() + final[key] = val + return final + + +def percent_encode_sequence(mapping, safe=SAFE_CHARS): + """Urlencode a dict or list into a string. + + This is similar to urllib.urlencode except that: + + * It uses quote, and not quote_plus + * It has a default list of safe chars that don't need + to be encoded, which matches what AWS services expect. + + If any value in the input ``mapping`` is a list type, + then each list element wil be serialized. This is the equivalent + to ``urlencode``'s ``doseq=True`` argument. + + This function should be preferred over the stdlib + ``urlencode()`` function. + + :param mapping: Either a dict to urlencode or a list of + ``(key, value)`` pairs. + + """ + encoded_pairs = [] + if hasattr(mapping, 'items'): + pairs = mapping.items() + else: + pairs = mapping + for key, value in pairs: + if isinstance(value, list): + for element in value: + encoded_pairs.append( + f'{percent_encode(key)}={percent_encode(element)}' + ) + else: + encoded_pairs.append( + f'{percent_encode(key)}={percent_encode(value)}' + ) + return '&'.join(encoded_pairs) + + +def percent_encode(input_str, safe=SAFE_CHARS): + """Urlencodes a string. + + Whereas percent_encode_sequence handles taking a dict/sequence and + producing a percent encoded string, this function deals only with + taking a string (not a dict/sequence) and percent encoding it. + + If given the binary type, will simply URL encode it. If given the + text type, will produce the binary type by UTF-8 encoding the + text. If given something else, will convert it to the text type + first. + """ + # If its not a binary or text string, make it a text string. + if not isinstance(input_str, (bytes, str)): + input_str = str(input_str) + # If it's not bytes, make it bytes by UTF-8 encoding it. + if not isinstance(input_str, bytes): + input_str = input_str.encode('utf-8') + return quote(input_str, safe=safe) + + +def _epoch_seconds_to_datetime(value, tzinfo): + """Parse numerical epoch timestamps (seconds since 1970) into a + ``datetime.datetime`` in UTC using ``datetime.timedelta``. This is intended + as fallback when ``fromtimestamp`` raises ``OverflowError`` or ``OSError``. + + :type value: float or int + :param value: The Unix timestamps as number. + + :type tzinfo: callable + :param tzinfo: A ``datetime.tzinfo`` class or compatible callable. + """ + epoch_zero = datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=tzutc()) + epoch_zero_localized = epoch_zero.astimezone(tzinfo()) + return epoch_zero_localized + datetime.timedelta(seconds=value) + + +def _parse_timestamp_with_tzinfo(value, tzinfo): + """Parse timestamp with pluggable tzinfo options.""" + if isinstance(value, (int, float)): + # Possibly an epoch time. + return datetime.datetime.fromtimestamp(value, tzinfo()) + else: + try: + return datetime.datetime.fromtimestamp(float(value), tzinfo()) + except (TypeError, ValueError): + pass + try: + # In certain cases, a timestamp marked with GMT can be parsed into a + # different time zone, so here we provide a context which will + # enforce that GMT == UTC. + return dateutil.parser.parse(value, tzinfos={'GMT': tzutc()}) + except (TypeError, ValueError) as e: + raise ValueError(f'Invalid timestamp "{value}": {e}') + + +def parse_timestamp(value): + """Parse a timestamp into a datetime object. + + Supported formats: + + * iso8601 + * rfc822 + * epoch (value is an integer) + + This will return a ``datetime.datetime`` object. + + """ + tzinfo_options = get_tzinfo_options() + for tzinfo in tzinfo_options: + try: + return _parse_timestamp_with_tzinfo(value, tzinfo) + except (OSError, OverflowError) as e: + logger.debug( + 'Unable to parse timestamp with "%s" timezone info.', + tzinfo.__name__, + exc_info=e, + ) + # For numeric values attempt fallback to using fromtimestamp-free method. + # From Python's ``datetime.datetime.fromtimestamp`` documentation: "This + # may raise ``OverflowError``, if the timestamp is out of the range of + # values supported by the platform C localtime() function, and ``OSError`` + # on localtime() failure. It's common for this to be restricted to years + # from 1970 through 2038." + try: + numeric_value = float(value) + except (TypeError, ValueError): + pass + else: + try: + for tzinfo in tzinfo_options: + return _epoch_seconds_to_datetime(numeric_value, tzinfo=tzinfo) + except (OSError, OverflowError) as e: + logger.debug( + 'Unable to parse timestamp using fallback method with "%s" ' + 'timezone info.', + tzinfo.__name__, + exc_info=e, + ) + raise RuntimeError( + f'Unable to calculate correct timezone offset for "{value}"' + ) + + +def parse_to_aware_datetime(value): + """Converted the passed in value to a datetime object with tzinfo. + + This function can be used to normalize all timestamp inputs. This + function accepts a number of different types of inputs, but + will always return a datetime.datetime object with time zone + information. + + The input param ``value`` can be one of several types: + + * A datetime object (both naive and aware) + * An integer representing the epoch time (can also be a string + of the integer, i.e '0', instead of 0). The epoch time is + considered to be UTC. + * An iso8601 formatted timestamp. This does not need to be + a complete timestamp, it can contain just the date portion + without the time component. + + The returned value will be a datetime object that will have tzinfo. + If no timezone info was provided in the input value, then UTC is + assumed, not local time. + + """ + # This is a general purpose method that handles several cases of + # converting the provided value to a string timestamp suitable to be + # serialized to an http request. It can handle: + # 1) A datetime.datetime object. + if isinstance(value, _DatetimeClass): + datetime_obj = value + else: + # 2) A string object that's formatted as a timestamp. + # We document this as being an iso8601 timestamp, although + # parse_timestamp is a bit more flexible. + datetime_obj = parse_timestamp(value) + if datetime_obj.tzinfo is None: + # I think a case would be made that if no time zone is provided, + # we should use the local time. However, to restore backwards + # compat, the previous behavior was to assume UTC, which is + # what we're going to do here. + datetime_obj = datetime_obj.replace(tzinfo=tzutc()) + else: + datetime_obj = datetime_obj.astimezone(tzutc()) + return datetime_obj + + +def datetime2timestamp(dt, default_timezone=None): + """Calculate the timestamp based on the given datetime instance. + + :type dt: datetime + :param dt: A datetime object to be converted into timestamp + :type default_timezone: tzinfo + :param default_timezone: If it is provided as None, we treat it as tzutc(). + But it is only used when dt is a naive datetime. + :returns: The timestamp + """ + epoch = datetime.datetime(1970, 1, 1) + if dt.tzinfo is None: + if default_timezone is None: + default_timezone = tzutc() + dt = dt.replace(tzinfo=default_timezone) + d = dt.replace(tzinfo=None) - dt.utcoffset() - epoch + return d.total_seconds() + + +def calculate_sha256(body, as_hex=False): + """Calculate a sha256 checksum. + + This method will calculate the sha256 checksum of a file like + object. Note that this method will iterate through the entire + file contents. The caller is responsible for ensuring the proper + starting position of the file and ``seek()``'ing the file back + to its starting location if other consumers need to read from + the file like object. + + :param body: Any file like object. The file must be opened + in binary mode such that a ``.read()`` call returns bytes. + :param as_hex: If True, then the hex digest is returned. + If False, then the digest (as binary bytes) is returned. + + :returns: The sha256 checksum + + """ + checksum = hashlib.sha256() + for chunk in iter(lambda: body.read(1024 * 1024), b''): + checksum.update(chunk) + if as_hex: + return checksum.hexdigest() + else: + return checksum.digest() + + +def calculate_tree_hash(body): + """Calculate a tree hash checksum. + + For more information see: + + http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html + + :param body: Any file like object. This has the same constraints as + the ``body`` param in calculate_sha256 + + :rtype: str + :returns: The hex version of the calculated tree hash + + """ + chunks = [] + required_chunk_size = 1024 * 1024 + sha256 = hashlib.sha256 + for chunk in iter(lambda: body.read(required_chunk_size), b''): + chunks.append(sha256(chunk).digest()) + if not chunks: + return sha256(b'').hexdigest() + while len(chunks) > 1: + new_chunks = [] + for first, second in _in_pairs(chunks): + if second is not None: + new_chunks.append(sha256(first + second).digest()) + else: + # We're at the end of the list and there's no pair left. + new_chunks.append(first) + chunks = new_chunks + return binascii.hexlify(chunks[0]).decode('ascii') + + +def _in_pairs(iterable): + # Creates iterator that iterates over the list in pairs: + # for a, b in _in_pairs([0, 1, 2, 3, 4]): + # print(a, b) + # + # will print: + # 0, 1 + # 2, 3 + # 4, None + shared_iter = iter(iterable) + # Note that zip_longest is a compat import that uses + # the itertools izip_longest. This creates an iterator, + # this call below does _not_ immediately create the list + # of pairs. + return zip_longest(shared_iter, shared_iter) + + +class CachedProperty: + """A read only property that caches the initially computed value. + + This descriptor will only call the provided ``fget`` function once. + Subsequent access to this property will return the cached value. + + """ + + def __init__(self, fget): + self._fget = fget + + def __get__(self, obj, cls): + if obj is None: + return self + else: + computed_value = self._fget(obj) + obj.__dict__[self._fget.__name__] = computed_value + return computed_value + + +class ArgumentGenerator: + """Generate sample input based on a shape model. + + This class contains a ``generate_skeleton`` method that will take + an input/output shape (created from ``botocore.model``) and generate + a sample dictionary corresponding to the input/output shape. + + The specific values used are place holder values. For strings either an + empty string or the member name can be used, for numbers 0 or 0.0 is used. + The intended usage of this class is to generate the *shape* of the input + structure. + + This can be useful for operations that have complex input shapes. + This allows a user to just fill in the necessary data instead of + worrying about the specific structure of the input arguments. + + Example usage:: + + s = botocore.session.get_session() + ddb = s.get_service_model('dynamodb') + arg_gen = ArgumentGenerator() + sample_input = arg_gen.generate_skeleton( + ddb.operation_model('CreateTable').input_shape) + print("Sample input for dynamodb.CreateTable: %s" % sample_input) + + """ + + def __init__(self, use_member_names=False): + self._use_member_names = use_member_names + + def generate_skeleton(self, shape): + """Generate a sample input. + + :type shape: ``botocore.model.Shape`` + :param shape: The input shape. + + :return: The generated skeleton input corresponding to the + provided input shape. + + """ + stack = [] + return self._generate_skeleton(shape, stack) + + def _generate_skeleton(self, shape, stack, name=''): + stack.append(shape.name) + try: + if shape.type_name == 'structure': + return self._generate_type_structure(shape, stack) + elif shape.type_name == 'list': + return self._generate_type_list(shape, stack) + elif shape.type_name == 'map': + return self._generate_type_map(shape, stack) + elif shape.type_name == 'string': + if self._use_member_names: + return name + if shape.enum: + return random.choice(shape.enum) + return '' + elif shape.type_name in ['integer', 'long']: + return 0 + elif shape.type_name in ['float', 'double']: + return 0.0 + elif shape.type_name == 'boolean': + return True + elif shape.type_name == 'timestamp': + return datetime.datetime(1970, 1, 1, 0, 0, 0) + finally: + stack.pop() + + def _generate_type_structure(self, shape, stack): + if stack.count(shape.name) > 1: + return {} + skeleton = OrderedDict() + for member_name, member_shape in shape.members.items(): + skeleton[member_name] = self._generate_skeleton( + member_shape, stack, name=member_name + ) + return skeleton + + def _generate_type_list(self, shape, stack): + # For list elements we've arbitrarily decided to + # return two elements for the skeleton list. + name = '' + if self._use_member_names: + name = shape.member.name + return [ + self._generate_skeleton(shape.member, stack, name), + ] + + def _generate_type_map(self, shape, stack): + key_shape = shape.key + value_shape = shape.value + assert key_shape.type_name == 'string' + return OrderedDict( + [ + ('KeyName', self._generate_skeleton(value_shape, stack)), + ] + ) + + +def is_valid_ipv6_endpoint_url(endpoint_url): + if UNSAFE_URL_CHARS.intersection(endpoint_url): + return False + hostname = f'[{urlparse(endpoint_url).hostname}]' + return IPV6_ADDRZ_RE.match(hostname) is not None + + +def is_valid_ipv4_endpoint_url(endpoint_url): + hostname = urlparse(endpoint_url).hostname + return IPV4_RE.match(hostname) is not None + + +def is_valid_endpoint_url(endpoint_url): + """Verify the endpoint_url is valid. + + :type endpoint_url: string + :param endpoint_url: An endpoint_url. Must have at least a scheme + and a hostname. + + :return: True if the endpoint url is valid. False otherwise. + + """ + # post-bpo-43882 urlsplit() strips unsafe characters from URL, causing + # it to pass hostname validation below. Detect them early to fix that. + if UNSAFE_URL_CHARS.intersection(endpoint_url): + return False + parts = urlsplit(endpoint_url) + hostname = parts.hostname + if hostname is None: + return False + if len(hostname) > 255: + return False + if hostname[-1] == ".": + hostname = hostname[:-1] + allowed = re.compile( + r"^((?!-)[A-Z\d-]{1,63}(? 63: + # Wrong length + return False + match = LABEL_RE.match(bucket_name) + if match is None or match.end() != len(bucket_name): + return False + return True + + +def fix_s3_host( + request, + signature_version, + region_name, + default_endpoint_url=None, + **kwargs, +): + """ + This handler looks at S3 requests just before they are signed. + If there is a bucket name on the path (true for everything except + ListAllBuckets) it checks to see if that bucket name conforms to + the DNS naming conventions. If it does, it alters the request to + use ``virtual hosting`` style addressing rather than ``path-style`` + addressing. + + """ + if request.context.get('use_global_endpoint', False): + default_endpoint_url = 's3.amazonaws.com' + try: + switch_to_virtual_host_style( + request, signature_version, default_endpoint_url + ) + except InvalidDNSNameError as e: + bucket_name = e.kwargs['bucket_name'] + logger.debug( + 'Not changing URI, bucket is not DNS compatible: %s', bucket_name + ) + + +def switch_to_virtual_host_style( + request, signature_version, default_endpoint_url=None, **kwargs +): + """ + This is a handler to force virtual host style s3 addressing no matter + the signature version (which is taken in consideration for the default + case). If the bucket is not DNS compatible an InvalidDNSName is thrown. + + :param request: A AWSRequest object that is about to be sent. + :param signature_version: The signature version to sign with + :param default_endpoint_url: The endpoint to use when switching to a + virtual style. If None is supplied, the virtual host will be + constructed from the url of the request. + """ + if request.auth_path is not None: + # The auth_path has already been applied (this may be a + # retried request). We don't need to perform this + # customization again. + return + elif _is_get_bucket_location_request(request): + # For the GetBucketLocation response, we should not be using + # the virtual host style addressing so we can avoid any sigv4 + # issues. + logger.debug( + "Request is GetBucketLocation operation, not checking " + "for DNS compatibility." + ) + return + parts = urlsplit(request.url) + request.auth_path = parts.path + path_parts = parts.path.split('/') + + # Retrieve what the endpoint we will be prepending the bucket name to. + if default_endpoint_url is None: + default_endpoint_url = parts.netloc + + if len(path_parts) > 1: + bucket_name = path_parts[1] + if not bucket_name: + # If the bucket name is empty we should not be checking for + # dns compatibility. + return + logger.debug('Checking for DNS compatible bucket for: %s', request.url) + if check_dns_name(bucket_name): + # If the operation is on a bucket, the auth_path must be + # terminated with a '/' character. + if len(path_parts) == 2: + if request.auth_path[-1] != '/': + request.auth_path += '/' + path_parts.remove(bucket_name) + # At the very least the path must be a '/', such as with the + # CreateBucket operation when DNS style is being used. If this + # is not used you will get an empty path which is incorrect. + path = '/'.join(path_parts) or '/' + global_endpoint = default_endpoint_url + host = bucket_name + '.' + global_endpoint + new_tuple = (parts.scheme, host, path, parts.query, '') + new_uri = urlunsplit(new_tuple) + request.url = new_uri + logger.debug('URI updated to: %s', new_uri) + else: + raise InvalidDNSNameError(bucket_name=bucket_name) + + +def _is_get_bucket_location_request(request): + return request.url.endswith('?location') + + +def instance_cache(func): + """Method decorator for caching method calls to a single instance. + + **This is not a general purpose caching decorator.** + + In order to use this, you *must* provide an ``_instance_cache`` + attribute on the instance. + + This decorator is used to cache method calls. The cache is only + scoped to a single instance though such that multiple instances + will maintain their own cache. In order to keep things simple, + this decorator requires that you provide an ``_instance_cache`` + attribute on your instance. + + """ + func_name = func.__name__ + + @functools.wraps(func) + def _cache_guard(self, *args, **kwargs): + cache_key = (func_name, args) + if kwargs: + kwarg_items = tuple(sorted(kwargs.items())) + cache_key = (func_name, args, kwarg_items) + result = self._instance_cache.get(cache_key) + if result is not None: + return result + result = func(self, *args, **kwargs) + self._instance_cache[cache_key] = result + return result + + return _cache_guard + + +def lru_cache_weakref(*cache_args, **cache_kwargs): + """ + Version of functools.lru_cache that stores a weak reference to ``self``. + + Serves the same purpose as :py:func:`instance_cache` but uses Python's + functools implementation which offers ``max_size`` and ``typed`` properties. + + lru_cache is a global cache even when used on a method. The cache's + reference to ``self`` will prevent garbage collection of the object. This + wrapper around functools.lru_cache replaces the reference to ``self`` with + a weak reference to not interfere with garbage collection. + """ + + def wrapper(func): + @functools.lru_cache(*cache_args, **cache_kwargs) + def func_with_weakref(weakref_to_self, *args, **kwargs): + return func(weakref_to_self(), *args, **kwargs) + + @functools.wraps(func) + def inner(self, *args, **kwargs): + for kwarg_key, kwarg_value in kwargs.items(): + if isinstance(kwarg_value, list): + kwargs[kwarg_key] = tuple(kwarg_value) + return func_with_weakref(weakref.ref(self), *args, **kwargs) + + inner.cache_info = func_with_weakref.cache_info + return inner + + return wrapper + + +def switch_host_s3_accelerate(request, operation_name, **kwargs): + """Switches the current s3 endpoint with an S3 Accelerate endpoint""" + + # Note that when registered the switching of the s3 host happens + # before it gets changed to virtual. So we are not concerned with ensuring + # that the bucket name is translated to the virtual style here and we + # can hard code the Accelerate endpoint. + parts = urlsplit(request.url).netloc.split('.') + parts = [p for p in parts if p in S3_ACCELERATE_WHITELIST] + endpoint = 'https://s3-accelerate.' + if len(parts) > 0: + endpoint += '.'.join(parts) + '.' + endpoint += 'amazonaws.com' + + if operation_name in ['ListBuckets', 'CreateBucket', 'DeleteBucket']: + return + _switch_hosts(request, endpoint, use_new_scheme=False) + + +def switch_host_with_param(request, param_name): + """Switches the host using a parameter value from a JSON request body""" + request_json = json.loads(request.data.decode('utf-8')) + if request_json.get(param_name): + new_endpoint = request_json[param_name] + _switch_hosts(request, new_endpoint) + + +def _switch_hosts(request, new_endpoint, use_new_scheme=True): + final_endpoint = _get_new_endpoint( + request.url, new_endpoint, use_new_scheme + ) + request.url = final_endpoint + + +def _get_new_endpoint(original_endpoint, new_endpoint, use_new_scheme=True): + new_endpoint_components = urlsplit(new_endpoint) + original_endpoint_components = urlsplit(original_endpoint) + scheme = original_endpoint_components.scheme + if use_new_scheme: + scheme = new_endpoint_components.scheme + final_endpoint_components = ( + scheme, + new_endpoint_components.netloc, + original_endpoint_components.path, + original_endpoint_components.query, + '', + ) + final_endpoint = urlunsplit(final_endpoint_components) + logger.debug( + 'Updating URI from %s to %s', original_endpoint, final_endpoint + ) + return final_endpoint + + +def deep_merge(base, extra): + """Deeply two dictionaries, overriding existing keys in the base. + + :param base: The base dictionary which will be merged into. + :param extra: The dictionary to merge into the base. Keys from this + dictionary will take precedence. + """ + for key in extra: + # If the key represents a dict on both given dicts, merge the sub-dicts + if ( + key in base + and isinstance(base[key], dict) + and isinstance(extra[key], dict) + ): + deep_merge(base[key], extra[key]) + continue + + # Otherwise, set the key on the base to be the value of the extra. + base[key] = extra[key] + + +def hyphenize_service_id(service_id): + """Translate the form used for event emitters. + + :param service_id: The service_id to convert. + """ + return service_id.replace(' ', '-').lower() + + +class IdentityCache: + """Base IdentityCache implementation for storing and retrieving + highly accessed credentials. + + This class is not intended to be instantiated in user code. + """ + + METHOD = "base_identity_cache" + + def __init__(self, client, credential_cls): + self._client = client + self._credential_cls = credential_cls + + def get_credentials(self, **kwargs): + callback = self.build_refresh_callback(**kwargs) + metadata = callback() + credential_entry = self._credential_cls.create_from_metadata( + metadata=metadata, + refresh_using=callback, + method=self.METHOD, + advisory_timeout=45, + mandatory_timeout=10, + ) + return credential_entry + + def build_refresh_callback(**kwargs): + """Callback to be implemented by subclasses. + + Returns a set of metadata to be converted into a new + credential instance. + """ + raise NotImplementedError() + + +class S3ExpressIdentityCache(IdentityCache): + """S3Express IdentityCache for retrieving and storing + credentials from CreateSession calls. + + This class is not intended to be instantiated in user code. + """ + + METHOD = "s3express" + + def __init__(self, client, credential_cls): + self._client = client + self._credential_cls = credential_cls + + @functools.lru_cache(maxsize=100) + def get_credentials(self, bucket): + return super().get_credentials(bucket=bucket) + + def build_refresh_callback(self, bucket): + def refresher(): + response = self._client.create_session(Bucket=bucket) + creds = response['Credentials'] + expiration = self._serialize_if_needed( + creds['Expiration'], iso=True + ) + return { + "access_key": creds['AccessKeyId'], + "secret_key": creds['SecretAccessKey'], + "token": creds['SessionToken'], + "expiry_time": expiration, + } + + return refresher + + def _serialize_if_needed(self, value, iso=False): + if isinstance(value, _DatetimeClass): + if iso: + return value.isoformat() + return value.strftime('%Y-%m-%dT%H:%M:%S%Z') + return value + + +class S3ExpressIdentityResolver: + def __init__(self, client, credential_cls, cache=None): + self._client = weakref.proxy(client) + + if cache is None: + cache = S3ExpressIdentityCache(self._client, credential_cls) + self._cache = cache + + def register(self, event_emitter=None): + logger.debug('Registering S3Express Identity Resolver') + emitter = event_emitter or self._client.meta.events + emitter.register('before-call.s3', self.apply_signing_cache_key) + emitter.register('before-sign.s3', self.resolve_s3express_identity) + + def apply_signing_cache_key(self, params, context, **kwargs): + endpoint_properties = context.get('endpoint_properties', {}) + backend = endpoint_properties.get('backend', None) + + # Add cache key if Bucket supplied for s3express request + bucket_name = context.get('input_params', {}).get('Bucket') + if backend == 'S3Express' and bucket_name is not None: + context.setdefault('signing', {}) + context['signing']['cache_key'] = bucket_name + + def resolve_s3express_identity( + self, + request, + signing_name, + region_name, + signature_version, + request_signer, + operation_name, + **kwargs, + ): + signing_context = request.context.get('signing', {}) + signing_name = signing_context.get('signing_name') + if signing_name == 's3express' and signature_version.startswith( + 'v4-s3express' + ): + signing_context['identity_cache'] = self._cache + if 'cache_key' not in signing_context: + signing_context['cache_key'] = ( + request.context.get('s3_redirect', {}) + .get('params', {}) + .get('Bucket') + ) + + +class S3RegionRedirectorv2: + """Updated version of S3RegionRedirector for use when + EndpointRulesetResolver is in use for endpoint resolution. + + This class is considered private and subject to abrupt breaking changes or + removal without prior announcement. Please do not use it directly. + """ + + def __init__(self, endpoint_bridge, client, cache=None): + self._cache = cache or {} + self._client = weakref.proxy(client) + + def register(self, event_emitter=None): + logger.debug('Registering S3 region redirector handler') + emitter = event_emitter or self._client.meta.events + emitter.register('needs-retry.s3', self.redirect_from_error) + emitter.register( + 'before-parameter-build.s3', self.annotate_request_context + ) + emitter.register( + 'before-endpoint-resolution.s3', self.redirect_from_cache + ) + + def redirect_from_error(self, request_dict, response, operation, **kwargs): + """ + An S3 request sent to the wrong region will return an error that + contains the endpoint the request should be sent to. This handler + will add the redirect information to the signing context and then + redirect the request. + """ + if response is None: + # This could be none if there was a ConnectionError or other + # transport error. + return + + redirect_ctx = request_dict.get('context', {}).get('s3_redirect', {}) + if ArnParser.is_arn(redirect_ctx.get('bucket')): + logger.debug( + 'S3 request was previously for an Accesspoint ARN, not ' + 'redirecting.' + ) + return + + if redirect_ctx.get('redirected'): + logger.debug( + 'S3 request was previously redirected, not redirecting.' + ) + return + + error = response[1].get('Error', {}) + error_code = error.get('Code') + response_metadata = response[1].get('ResponseMetadata', {}) + + # We have to account for 400 responses because + # if we sign a Head* request with the wrong region, + # we'll get a 400 Bad Request but we won't get a + # body saying it's an "AuthorizationHeaderMalformed". + is_special_head_object = ( + error_code in ('301', '400') and operation.name == 'HeadObject' + ) + is_special_head_bucket = ( + error_code in ('301', '400') + and operation.name == 'HeadBucket' + and 'x-amz-bucket-region' + in response_metadata.get('HTTPHeaders', {}) + ) + is_wrong_signing_region = ( + error_code == 'AuthorizationHeaderMalformed' and 'Region' in error + ) + is_redirect_status = response[0] is not None and response[ + 0 + ].status_code in (301, 302, 307) + is_permanent_redirect = error_code == 'PermanentRedirect' + is_opt_in_region_redirect = ( + error_code == 'IllegalLocationConstraintException' + and operation.name != 'CreateBucket' + ) + if not any( + [ + is_special_head_object, + is_wrong_signing_region, + is_permanent_redirect, + is_special_head_bucket, + is_redirect_status, + is_opt_in_region_redirect, + ] + ): + return + + bucket = request_dict['context']['s3_redirect']['bucket'] + client_region = request_dict['context'].get('client_region') + new_region = self.get_bucket_region(bucket, response) + + if new_region is None: + logger.debug( + "S3 client configured for region %s but the " + "bucket %s is not in that region and the proper region " + "could not be automatically determined.", + client_region, + bucket, + ) + return + + logger.debug( + "S3 client configured for region %s but the bucket %s " + "is in region %s; Please configure the proper region to " + "avoid multiple unnecessary redirects and signing attempts.", + client_region, + bucket, + new_region, + ) + # Adding the new region to _cache will make construct_endpoint() to + # use the new region as value for the AWS::Region builtin parameter. + self._cache[bucket] = new_region + + # Re-resolve endpoint with new region and modify request_dict with + # the new URL, auth scheme, and signing context. + ep_resolver = self._client._ruleset_resolver + ep_info = ep_resolver.construct_endpoint( + operation_model=operation, + call_args=request_dict['context']['s3_redirect']['params'], + request_context=request_dict['context'], + ) + request_dict['url'] = self.set_request_url( + request_dict['url'], ep_info.url + ) + request_dict['context']['s3_redirect']['redirected'] = True + auth_schemes = ep_info.properties.get('authSchemes') + if auth_schemes is not None: + auth_info = ep_resolver.auth_schemes_to_signing_ctx(auth_schemes) + auth_type, signing_context = auth_info + request_dict['context']['auth_type'] = auth_type + request_dict['context']['signing'] = { + **request_dict['context'].get('signing', {}), + **signing_context, + } + + # Return 0 so it doesn't wait to retry + return 0 + + def get_bucket_region(self, bucket, response): + """ + There are multiple potential sources for the new region to redirect to, + but they aren't all universally available for use. This will try to + find region from response elements, but will fall back to calling + HEAD on the bucket if all else fails. + + :param bucket: The bucket to find the region for. This is necessary if + the region is not available in the error response. + :param response: A response representing a service request that failed + due to incorrect region configuration. + """ + # First try to source the region from the headers. + service_response = response[1] + response_headers = service_response['ResponseMetadata']['HTTPHeaders'] + if 'x-amz-bucket-region' in response_headers: + return response_headers['x-amz-bucket-region'] + + # Next, check the error body + region = service_response.get('Error', {}).get('Region', None) + if region is not None: + return region + + # Finally, HEAD the bucket. No other choice sadly. + try: + response = self._client.head_bucket(Bucket=bucket) + headers = response['ResponseMetadata']['HTTPHeaders'] + except ClientError as e: + headers = e.response['ResponseMetadata']['HTTPHeaders'] + + region = headers.get('x-amz-bucket-region', None) + return region + + def set_request_url(self, old_url, new_endpoint, **kwargs): + """ + Splice a new endpoint into an existing URL. Note that some endpoints + from the the endpoint provider have a path component which will be + discarded by this function. + """ + return _get_new_endpoint(old_url, new_endpoint, False) + + def redirect_from_cache(self, builtins, params, **kwargs): + """ + If a bucket name has been redirected before, it is in the cache. This + handler will update the AWS::Region endpoint resolver builtin param + to use the region from cache instead of the client region to avoid the + redirect. + """ + bucket = params.get('Bucket') + if bucket is not None and bucket in self._cache: + new_region = self._cache.get(bucket) + builtins['AWS::Region'] = new_region + + def annotate_request_context(self, params, context, **kwargs): + """Store the bucket name in context for later use when redirecting. + The bucket name may be an access point ARN or alias. + """ + bucket = params.get('Bucket') + context['s3_redirect'] = { + 'redirected': False, + 'bucket': bucket, + 'params': params, + } + + +class S3RegionRedirector: + """This handler has been replaced by S3RegionRedirectorv2. The original + version remains in place for any third-party libraries that import it. + """ + + def __init__(self, endpoint_bridge, client, cache=None): + self._endpoint_resolver = endpoint_bridge + self._cache = cache + if self._cache is None: + self._cache = {} + + # This needs to be a weak ref in order to prevent memory leaks on + # python 2.6 + self._client = weakref.proxy(client) + + warnings.warn( + 'The S3RegionRedirector class has been deprecated for a new ' + 'internal replacement. A future version of botocore may remove ' + 'this class.', + category=FutureWarning, + ) + + def register(self, event_emitter=None): + emitter = event_emitter or self._client.meta.events + emitter.register('needs-retry.s3', self.redirect_from_error) + emitter.register('before-call.s3', self.set_request_url) + emitter.register('before-parameter-build.s3', self.redirect_from_cache) + + def redirect_from_error(self, request_dict, response, operation, **kwargs): + """ + An S3 request sent to the wrong region will return an error that + contains the endpoint the request should be sent to. This handler + will add the redirect information to the signing context and then + redirect the request. + """ + if response is None: + # This could be none if there was a ConnectionError or other + # transport error. + return + + if self._is_s3_accesspoint(request_dict.get('context', {})): + logger.debug( + 'S3 request was previously to an accesspoint, not redirecting.' + ) + return + + if request_dict.get('context', {}).get('s3_redirected'): + logger.debug( + 'S3 request was previously redirected, not redirecting.' + ) + return + + error = response[1].get('Error', {}) + error_code = error.get('Code') + response_metadata = response[1].get('ResponseMetadata', {}) + + # We have to account for 400 responses because + # if we sign a Head* request with the wrong region, + # we'll get a 400 Bad Request but we won't get a + # body saying it's an "AuthorizationHeaderMalformed". + is_special_head_object = ( + error_code in ('301', '400') and operation.name == 'HeadObject' + ) + is_special_head_bucket = ( + error_code in ('301', '400') + and operation.name == 'HeadBucket' + and 'x-amz-bucket-region' + in response_metadata.get('HTTPHeaders', {}) + ) + is_wrong_signing_region = ( + error_code == 'AuthorizationHeaderMalformed' and 'Region' in error + ) + is_redirect_status = response[0] is not None and response[ + 0 + ].status_code in (301, 302, 307) + is_permanent_redirect = error_code == 'PermanentRedirect' + if not any( + [ + is_special_head_object, + is_wrong_signing_region, + is_permanent_redirect, + is_special_head_bucket, + is_redirect_status, + ] + ): + return + + bucket = request_dict['context']['signing']['bucket'] + client_region = request_dict['context'].get('client_region') + new_region = self.get_bucket_region(bucket, response) + + if new_region is None: + logger.debug( + "S3 client configured for region %s but the bucket %s is not " + "in that region and the proper region could not be " + "automatically determined.", + client_region, + bucket, + ) + return + + logger.debug( + "S3 client configured for region %s but the bucket %s is in region" + " %s; Please configure the proper region to avoid multiple " + "unnecessary redirects and signing attempts.", + client_region, + bucket, + new_region, + ) + endpoint = self._endpoint_resolver.resolve('s3', new_region) + endpoint = endpoint['endpoint_url'] + + signing_context = { + 'region': new_region, + 'bucket': bucket, + 'endpoint': endpoint, + } + request_dict['context']['signing'] = signing_context + + self._cache[bucket] = signing_context + self.set_request_url(request_dict, request_dict['context']) + + request_dict['context']['s3_redirected'] = True + + # Return 0 so it doesn't wait to retry + return 0 + + def get_bucket_region(self, bucket, response): + """ + There are multiple potential sources for the new region to redirect to, + but they aren't all universally available for use. This will try to + find region from response elements, but will fall back to calling + HEAD on the bucket if all else fails. + + :param bucket: The bucket to find the region for. This is necessary if + the region is not available in the error response. + :param response: A response representing a service request that failed + due to incorrect region configuration. + """ + # First try to source the region from the headers. + service_response = response[1] + response_headers = service_response['ResponseMetadata']['HTTPHeaders'] + if 'x-amz-bucket-region' in response_headers: + return response_headers['x-amz-bucket-region'] + + # Next, check the error body + region = service_response.get('Error', {}).get('Region', None) + if region is not None: + return region + + # Finally, HEAD the bucket. No other choice sadly. + try: + response = self._client.head_bucket(Bucket=bucket) + headers = response['ResponseMetadata']['HTTPHeaders'] + except ClientError as e: + headers = e.response['ResponseMetadata']['HTTPHeaders'] + + region = headers.get('x-amz-bucket-region', None) + return region + + def set_request_url(self, params, context, **kwargs): + endpoint = context.get('signing', {}).get('endpoint', None) + if endpoint is not None: + params['url'] = _get_new_endpoint(params['url'], endpoint, False) + + def redirect_from_cache(self, params, context, **kwargs): + """ + This handler retrieves a given bucket's signing context from the cache + and adds it into the request context. + """ + if self._is_s3_accesspoint(context): + return + bucket = params.get('Bucket') + signing_context = self._cache.get(bucket) + if signing_context is not None: + context['signing'] = signing_context + else: + context['signing'] = {'bucket': bucket} + + def _is_s3_accesspoint(self, context): + return 's3_accesspoint' in context + + +class InvalidArnException(ValueError): + pass + + +class ArnParser: + def parse_arn(self, arn): + arn_parts = arn.split(':', 5) + if len(arn_parts) < 6: + raise InvalidArnException( + f'Provided ARN: {arn} must be of the format: ' + 'arn:partition:service:region:account:resource' + ) + return { + 'partition': arn_parts[1], + 'service': arn_parts[2], + 'region': arn_parts[3], + 'account': arn_parts[4], + 'resource': arn_parts[5], + } + + @staticmethod + def is_arn(value): + if not isinstance(value, str) or not value.startswith('arn:'): + return False + arn_parser = ArnParser() + try: + arn_parser.parse_arn(value) + return True + except InvalidArnException: + return False + + +class S3ArnParamHandler: + _RESOURCE_REGEX = re.compile( + r'^(?Paccesspoint|outpost)[/:](?P.+)$' + ) + _OUTPOST_RESOURCE_REGEX = re.compile( + r'^(?P[a-zA-Z0-9\-]{1,63})[/:]accesspoint[/:]' + r'(?P[a-zA-Z0-9\-]{1,63}$)' + ) + _BLACKLISTED_OPERATIONS = ['CreateBucket'] + + def __init__(self, arn_parser=None): + self._arn_parser = arn_parser + if arn_parser is None: + self._arn_parser = ArnParser() + + def register(self, event_emitter): + event_emitter.register('before-parameter-build.s3', self.handle_arn) + + def handle_arn(self, params, model, context, **kwargs): + if model.name in self._BLACKLISTED_OPERATIONS: + return + arn_details = self._get_arn_details_from_bucket_param(params) + if arn_details is None: + return + if arn_details['resource_type'] == 'accesspoint': + self._store_accesspoint(params, context, arn_details) + elif arn_details['resource_type'] == 'outpost': + self._store_outpost(params, context, arn_details) + + def _get_arn_details_from_bucket_param(self, params): + if 'Bucket' in params: + try: + arn = params['Bucket'] + arn_details = self._arn_parser.parse_arn(arn) + self._add_resource_type_and_name(arn, arn_details) + return arn_details + except InvalidArnException: + pass + return None + + def _add_resource_type_and_name(self, arn, arn_details): + match = self._RESOURCE_REGEX.match(arn_details['resource']) + if match: + arn_details['resource_type'] = match.group('resource_type') + arn_details['resource_name'] = match.group('resource_name') + else: + raise UnsupportedS3ArnError(arn=arn) + + def _store_accesspoint(self, params, context, arn_details): + # Ideally the access-point would be stored as a parameter in the + # request where the serializer would then know how to serialize it, + # but access-points are not modeled in S3 operations so it would fail + # validation. Instead, we set the access-point to the bucket parameter + # to have some value set when serializing the request and additional + # information on the context from the arn to use in forming the + # access-point endpoint. + params['Bucket'] = arn_details['resource_name'] + context['s3_accesspoint'] = { + 'name': arn_details['resource_name'], + 'account': arn_details['account'], + 'partition': arn_details['partition'], + 'region': arn_details['region'], + 'service': arn_details['service'], + } + + def _store_outpost(self, params, context, arn_details): + resource_name = arn_details['resource_name'] + match = self._OUTPOST_RESOURCE_REGEX.match(resource_name) + if not match: + raise UnsupportedOutpostResourceError(resource_name=resource_name) + # Because we need to set the bucket name to something to pass + # validation we're going to use the access point name to be consistent + # with normal access point arns. + accesspoint_name = match.group('accesspoint_name') + params['Bucket'] = accesspoint_name + context['s3_accesspoint'] = { + 'outpost_name': match.group('outpost_name'), + 'name': accesspoint_name, + 'account': arn_details['account'], + 'partition': arn_details['partition'], + 'region': arn_details['region'], + 'service': arn_details['service'], + } + + +class S3EndpointSetter: + _DEFAULT_PARTITION = 'aws' + _DEFAULT_DNS_SUFFIX = 'amazonaws.com' + + def __init__( + self, + endpoint_resolver, + region=None, + s3_config=None, + endpoint_url=None, + partition=None, + use_fips_endpoint=False, + ): + # This is calling the endpoint_resolver in regions.py + self._endpoint_resolver = endpoint_resolver + self._region = region + self._s3_config = s3_config + self._use_fips_endpoint = use_fips_endpoint + if s3_config is None: + self._s3_config = {} + self._endpoint_url = endpoint_url + self._partition = partition + if partition is None: + self._partition = self._DEFAULT_PARTITION + + def register(self, event_emitter): + event_emitter.register('before-sign.s3', self.set_endpoint) + event_emitter.register('choose-signer.s3', self.set_signer) + event_emitter.register( + 'before-call.s3.WriteGetObjectResponse', + self.update_endpoint_to_s3_object_lambda, + ) + + def update_endpoint_to_s3_object_lambda(self, params, context, **kwargs): + if self._use_accelerate_endpoint: + raise UnsupportedS3ConfigurationError( + msg='S3 client does not support accelerate endpoints for S3 Object Lambda operations', + ) + + self._override_signing_name(context, 's3-object-lambda') + if self._endpoint_url: + # Only update the url if an explicit url was not provided + return + + resolver = self._endpoint_resolver + # Constructing endpoints as s3-object-lambda as region + resolved = resolver.construct_endpoint( + 's3-object-lambda', self._region + ) + + # Ideally we would be able to replace the endpoint before + # serialization but there's no event to do that currently + # host_prefix is all the arn/bucket specs + new_endpoint = 'https://{host_prefix}{hostname}'.format( + host_prefix=params['host_prefix'], + hostname=resolved['hostname'], + ) + + params['url'] = _get_new_endpoint(params['url'], new_endpoint, False) + + def set_endpoint(self, request, **kwargs): + if self._use_accesspoint_endpoint(request): + self._validate_accesspoint_supported(request) + self._validate_fips_supported(request) + self._validate_global_regions(request) + region_name = self._resolve_region_for_accesspoint_endpoint( + request + ) + self._resolve_signing_name_for_accesspoint_endpoint(request) + self._switch_to_accesspoint_endpoint(request, region_name) + return + if self._use_accelerate_endpoint: + if self._use_fips_endpoint: + raise UnsupportedS3ConfigurationError( + msg=( + 'Client is configured to use the FIPS psuedo region ' + f'for "{self._region}", but S3 Accelerate does not have any FIPS ' + 'compatible endpoints.' + ) + ) + switch_host_s3_accelerate(request=request, **kwargs) + if self._s3_addressing_handler: + self._s3_addressing_handler(request=request, **kwargs) + + def _use_accesspoint_endpoint(self, request): + return 's3_accesspoint' in request.context + + def _validate_fips_supported(self, request): + if not self._use_fips_endpoint: + return + if 'fips' in request.context['s3_accesspoint']['region']: + raise UnsupportedS3AccesspointConfigurationError( + msg={'Invalid ARN, FIPS region not allowed in ARN.'} + ) + if 'outpost_name' in request.context['s3_accesspoint']: + raise UnsupportedS3AccesspointConfigurationError( + msg=( + f'Client is configured to use the FIPS psuedo-region "{self._region}", ' + 'but outpost ARNs do not support FIPS endpoints.' + ) + ) + # Transforming psuedo region to actual region + accesspoint_region = request.context['s3_accesspoint']['region'] + if accesspoint_region != self._region: + if not self._s3_config.get('use_arn_region', True): + # TODO: Update message to reflect use_arn_region + # is not set + raise UnsupportedS3AccesspointConfigurationError( + msg=( + 'Client is configured to use the FIPS psuedo-region ' + f'for "{self._region}", but the access-point ARN provided is for ' + f'the "{accesspoint_region}" region. For clients using a FIPS ' + 'psuedo-region calls to access-point ARNs in another ' + 'region are not allowed.' + ) + ) + + def _validate_global_regions(self, request): + if self._s3_config.get('use_arn_region', True): + return + if self._region in ['aws-global', 's3-external-1']: + raise UnsupportedS3AccesspointConfigurationError( + msg=( + 'Client is configured to use the global psuedo-region ' + f'"{self._region}". When providing access-point ARNs a regional ' + 'endpoint must be specified.' + ) + ) + + def _validate_accesspoint_supported(self, request): + if self._use_accelerate_endpoint: + raise UnsupportedS3AccesspointConfigurationError( + msg=( + 'Client does not support s3 accelerate configuration ' + 'when an access-point ARN is specified.' + ) + ) + request_partition = request.context['s3_accesspoint']['partition'] + if request_partition != self._partition: + raise UnsupportedS3AccesspointConfigurationError( + msg=( + f'Client is configured for "{self._partition}" partition, but access-point' + f' ARN provided is for "{request_partition}" partition. The client and ' + ' access-point partition must be the same.' + ) + ) + s3_service = request.context['s3_accesspoint'].get('service') + if s3_service == 's3-object-lambda' and self._s3_config.get( + 'use_dualstack_endpoint' + ): + raise UnsupportedS3AccesspointConfigurationError( + msg=( + 'Client does not support s3 dualstack configuration ' + 'when an S3 Object Lambda access point ARN is specified.' + ) + ) + outpost_name = request.context['s3_accesspoint'].get('outpost_name') + if outpost_name and self._s3_config.get('use_dualstack_endpoint'): + raise UnsupportedS3AccesspointConfigurationError( + msg=( + 'Client does not support s3 dualstack configuration ' + 'when an outpost ARN is specified.' + ) + ) + self._validate_mrap_s3_config(request) + + def _validate_mrap_s3_config(self, request): + if not is_global_accesspoint(request.context): + return + if self._s3_config.get('s3_disable_multiregion_access_points'): + raise UnsupportedS3AccesspointConfigurationError( + msg=( + 'Invalid configuration, Multi-Region Access Point ' + 'ARNs are disabled.' + ) + ) + elif self._s3_config.get('use_dualstack_endpoint'): + raise UnsupportedS3AccesspointConfigurationError( + msg=( + 'Client does not support s3 dualstack configuration ' + 'when a Multi-Region Access Point ARN is specified.' + ) + ) + + def _resolve_region_for_accesspoint_endpoint(self, request): + if is_global_accesspoint(request.context): + # Requests going to MRAP endpoints MUST be set to any (*) region. + self._override_signing_region(request, '*') + elif self._s3_config.get('use_arn_region', True): + accesspoint_region = request.context['s3_accesspoint']['region'] + # If we are using the region from the access point, + # we will also want to make sure that we set it as the + # signing region as well + self._override_signing_region(request, accesspoint_region) + return accesspoint_region + return self._region + + def set_signer(self, context, **kwargs): + if is_global_accesspoint(context): + if HAS_CRT: + return 's3v4a' + else: + raise MissingDependencyException( + msg="Using S3 with an MRAP arn requires an additional " + "dependency. You will need to pip install " + "botocore[crt] before proceeding." + ) + + def _resolve_signing_name_for_accesspoint_endpoint(self, request): + accesspoint_service = request.context['s3_accesspoint']['service'] + self._override_signing_name(request.context, accesspoint_service) + + def _switch_to_accesspoint_endpoint(self, request, region_name): + original_components = urlsplit(request.url) + accesspoint_endpoint = urlunsplit( + ( + original_components.scheme, + self._get_netloc(request.context, region_name), + self._get_accesspoint_path( + original_components.path, request.context + ), + original_components.query, + '', + ) + ) + logger.debug( + 'Updating URI from %s to %s', request.url, accesspoint_endpoint + ) + request.url = accesspoint_endpoint + + def _get_netloc(self, request_context, region_name): + if is_global_accesspoint(request_context): + return self._get_mrap_netloc(request_context) + else: + return self._get_accesspoint_netloc(request_context, region_name) + + def _get_mrap_netloc(self, request_context): + s3_accesspoint = request_context['s3_accesspoint'] + region_name = 's3-global' + mrap_netloc_components = [s3_accesspoint['name']] + if self._endpoint_url: + endpoint_url_netloc = urlsplit(self._endpoint_url).netloc + mrap_netloc_components.append(endpoint_url_netloc) + else: + partition = s3_accesspoint['partition'] + mrap_netloc_components.extend( + [ + 'accesspoint', + region_name, + self._get_partition_dns_suffix(partition), + ] + ) + return '.'.join(mrap_netloc_components) + + def _get_accesspoint_netloc(self, request_context, region_name): + s3_accesspoint = request_context['s3_accesspoint'] + accesspoint_netloc_components = [ + '{}-{}'.format(s3_accesspoint['name'], s3_accesspoint['account']), + ] + outpost_name = s3_accesspoint.get('outpost_name') + if self._endpoint_url: + if outpost_name: + accesspoint_netloc_components.append(outpost_name) + endpoint_url_netloc = urlsplit(self._endpoint_url).netloc + accesspoint_netloc_components.append(endpoint_url_netloc) + else: + if outpost_name: + outpost_host = [outpost_name, 's3-outposts'] + accesspoint_netloc_components.extend(outpost_host) + elif s3_accesspoint['service'] == 's3-object-lambda': + component = self._inject_fips_if_needed( + 's3-object-lambda', request_context + ) + accesspoint_netloc_components.append(component) + else: + component = self._inject_fips_if_needed( + 's3-accesspoint', request_context + ) + accesspoint_netloc_components.append(component) + if self._s3_config.get('use_dualstack_endpoint'): + accesspoint_netloc_components.append('dualstack') + accesspoint_netloc_components.extend( + [region_name, self._get_dns_suffix(region_name)] + ) + return '.'.join(accesspoint_netloc_components) + + def _inject_fips_if_needed(self, component, request_context): + if self._use_fips_endpoint: + return f'{component}-fips' + return component + + def _get_accesspoint_path(self, original_path, request_context): + # The Bucket parameter was substituted with the access-point name as + # some value was required in serializing the bucket name. Now that + # we are making the request directly to the access point, we will + # want to remove that access-point name from the path. + name = request_context['s3_accesspoint']['name'] + # All S3 operations require at least a / in their path. + return original_path.replace('/' + name, '', 1) or '/' + + def _get_partition_dns_suffix(self, partition_name): + dns_suffix = self._endpoint_resolver.get_partition_dns_suffix( + partition_name + ) + if dns_suffix is None: + dns_suffix = self._DEFAULT_DNS_SUFFIX + return dns_suffix + + def _get_dns_suffix(self, region_name): + resolved = self._endpoint_resolver.construct_endpoint( + 's3', region_name + ) + dns_suffix = self._DEFAULT_DNS_SUFFIX + if resolved and 'dnsSuffix' in resolved: + dns_suffix = resolved['dnsSuffix'] + return dns_suffix + + def _override_signing_region(self, request, region_name): + signing_context = request.context.get('signing', {}) + # S3SigV4Auth will use the context['signing']['region'] value to + # sign with if present. This is used by the Bucket redirector + # as well but we should be fine because the redirector is never + # used in combination with the accesspoint setting logic. + signing_context['region'] = region_name + request.context['signing'] = signing_context + + def _override_signing_name(self, context, signing_name): + signing_context = context.get('signing', {}) + # S3SigV4Auth will use the context['signing']['signing_name'] value to + # sign with if present. This is used by the Bucket redirector + # as well but we should be fine because the redirector is never + # used in combination with the accesspoint setting logic. + signing_context['signing_name'] = signing_name + context['signing'] = signing_context + + @CachedProperty + def _use_accelerate_endpoint(self): + # Enable accelerate if the configuration is set to to true or the + # endpoint being used matches one of the accelerate endpoints. + + # Accelerate has been explicitly configured. + if self._s3_config.get('use_accelerate_endpoint'): + return True + + # Accelerate mode is turned on automatically if an endpoint url is + # provided that matches the accelerate scheme. + if self._endpoint_url is None: + return False + + # Accelerate is only valid for Amazon endpoints. + netloc = urlsplit(self._endpoint_url).netloc + if not netloc.endswith('amazonaws.com'): + return False + + # The first part of the url should always be s3-accelerate. + parts = netloc.split('.') + if parts[0] != 's3-accelerate': + return False + + # Url parts between 's3-accelerate' and 'amazonaws.com' which + # represent different url features. + feature_parts = parts[1:-2] + + # There should be no duplicate url parts. + if len(feature_parts) != len(set(feature_parts)): + return False + + # Remaining parts must all be in the whitelist. + return all(p in S3_ACCELERATE_WHITELIST for p in feature_parts) + + @CachedProperty + def _addressing_style(self): + # Use virtual host style addressing if accelerate is enabled or if + # the given endpoint url is an accelerate endpoint. + if self._use_accelerate_endpoint: + return 'virtual' + + # If a particular addressing style is configured, use it. + configured_addressing_style = self._s3_config.get('addressing_style') + if configured_addressing_style: + return configured_addressing_style + + @CachedProperty + def _s3_addressing_handler(self): + # If virtual host style was configured, use it regardless of whether + # or not the bucket looks dns compatible. + if self._addressing_style == 'virtual': + logger.debug("Using S3 virtual host style addressing.") + return switch_to_virtual_host_style + + # If path style is configured, no additional steps are needed. If + # endpoint_url was specified, don't default to virtual. We could + # potentially default provided endpoint urls to virtual hosted + # style, but for now it is avoided. + if self._addressing_style == 'path' or self._endpoint_url is not None: + logger.debug("Using S3 path style addressing.") + return None + + logger.debug( + "Defaulting to S3 virtual host style addressing with " + "path style addressing fallback." + ) + + # By default, try to use virtual style with path fallback. + return fix_s3_host + + +class S3ControlEndpointSetter: + _DEFAULT_PARTITION = 'aws' + _DEFAULT_DNS_SUFFIX = 'amazonaws.com' + _HOST_LABEL_REGEX = re.compile(r'^[a-zA-Z0-9\-]{1,63}$') + + def __init__( + self, + endpoint_resolver, + region=None, + s3_config=None, + endpoint_url=None, + partition=None, + use_fips_endpoint=False, + ): + self._endpoint_resolver = endpoint_resolver + self._region = region + self._s3_config = s3_config + self._use_fips_endpoint = use_fips_endpoint + if s3_config is None: + self._s3_config = {} + self._endpoint_url = endpoint_url + self._partition = partition + if partition is None: + self._partition = self._DEFAULT_PARTITION + + def register(self, event_emitter): + event_emitter.register('before-sign.s3-control', self.set_endpoint) + + def set_endpoint(self, request, **kwargs): + if self._use_endpoint_from_arn_details(request): + self._validate_endpoint_from_arn_details_supported(request) + region_name = self._resolve_region_from_arn_details(request) + self._resolve_signing_name_from_arn_details(request) + self._resolve_endpoint_from_arn_details(request, region_name) + self._add_headers_from_arn_details(request) + elif self._use_endpoint_from_outpost_id(request): + self._validate_outpost_redirection_valid(request) + self._override_signing_name(request, 's3-outposts') + new_netloc = self._construct_outpost_endpoint(self._region) + self._update_request_netloc(request, new_netloc) + + def _use_endpoint_from_arn_details(self, request): + return 'arn_details' in request.context + + def _use_endpoint_from_outpost_id(self, request): + return 'outpost_id' in request.context + + def _validate_endpoint_from_arn_details_supported(self, request): + if 'fips' in request.context['arn_details']['region']: + raise UnsupportedS3ControlArnError( + arn=request.context['arn_details']['original'], + msg='Invalid ARN, FIPS region not allowed in ARN.', + ) + if not self._s3_config.get('use_arn_region', False): + arn_region = request.context['arn_details']['region'] + if arn_region != self._region: + error_msg = ( + 'The use_arn_region configuration is disabled but ' + f'received arn for "{arn_region}" when the client is configured ' + f'to use "{self._region}"' + ) + raise UnsupportedS3ControlConfigurationError(msg=error_msg) + request_partion = request.context['arn_details']['partition'] + if request_partion != self._partition: + raise UnsupportedS3ControlConfigurationError( + msg=( + f'Client is configured for "{self._partition}" partition, but arn ' + f'provided is for "{request_partion}" partition. The client and ' + 'arn partition must be the same.' + ) + ) + if self._s3_config.get('use_accelerate_endpoint'): + raise UnsupportedS3ControlConfigurationError( + msg='S3 control client does not support accelerate endpoints', + ) + if 'outpost_name' in request.context['arn_details']: + self._validate_outpost_redirection_valid(request) + + def _validate_outpost_redirection_valid(self, request): + if self._s3_config.get('use_dualstack_endpoint'): + raise UnsupportedS3ControlConfigurationError( + msg=( + 'Client does not support s3 dualstack configuration ' + 'when an outpost is specified.' + ) + ) + + def _resolve_region_from_arn_details(self, request): + if self._s3_config.get('use_arn_region', False): + arn_region = request.context['arn_details']['region'] + # If we are using the region from the expanded arn, we will also + # want to make sure that we set it as the signing region as well + self._override_signing_region(request, arn_region) + return arn_region + return self._region + + def _resolve_signing_name_from_arn_details(self, request): + arn_service = request.context['arn_details']['service'] + self._override_signing_name(request, arn_service) + return arn_service + + def _resolve_endpoint_from_arn_details(self, request, region_name): + new_netloc = self._resolve_netloc_from_arn_details( + request, region_name + ) + self._update_request_netloc(request, new_netloc) + + def _update_request_netloc(self, request, new_netloc): + original_components = urlsplit(request.url) + arn_details_endpoint = urlunsplit( + ( + original_components.scheme, + new_netloc, + original_components.path, + original_components.query, + '', + ) + ) + logger.debug( + 'Updating URI from %s to %s', request.url, arn_details_endpoint + ) + request.url = arn_details_endpoint + + def _resolve_netloc_from_arn_details(self, request, region_name): + arn_details = request.context['arn_details'] + if 'outpost_name' in arn_details: + return self._construct_outpost_endpoint(region_name) + account = arn_details['account'] + return self._construct_s3_control_endpoint(region_name, account) + + def _is_valid_host_label(self, label): + return self._HOST_LABEL_REGEX.match(label) + + def _validate_host_labels(self, *labels): + for label in labels: + if not self._is_valid_host_label(label): + raise InvalidHostLabelError(label=label) + + def _construct_s3_control_endpoint(self, region_name, account): + self._validate_host_labels(region_name, account) + if self._endpoint_url: + endpoint_url_netloc = urlsplit(self._endpoint_url).netloc + netloc = [account, endpoint_url_netloc] + else: + netloc = [ + account, + 's3-control', + ] + self._add_dualstack(netloc) + dns_suffix = self._get_dns_suffix(region_name) + netloc.extend([region_name, dns_suffix]) + return self._construct_netloc(netloc) + + def _construct_outpost_endpoint(self, region_name): + self._validate_host_labels(region_name) + if self._endpoint_url: + return urlsplit(self._endpoint_url).netloc + else: + netloc = [ + 's3-outposts', + region_name, + self._get_dns_suffix(region_name), + ] + self._add_fips(netloc) + return self._construct_netloc(netloc) + + def _construct_netloc(self, netloc): + return '.'.join(netloc) + + def _add_fips(self, netloc): + if self._use_fips_endpoint: + netloc[0] = netloc[0] + '-fips' + + def _add_dualstack(self, netloc): + if self._s3_config.get('use_dualstack_endpoint'): + netloc.append('dualstack') + + def _get_dns_suffix(self, region_name): + resolved = self._endpoint_resolver.construct_endpoint( + 's3', region_name + ) + dns_suffix = self._DEFAULT_DNS_SUFFIX + if resolved and 'dnsSuffix' in resolved: + dns_suffix = resolved['dnsSuffix'] + return dns_suffix + + def _override_signing_region(self, request, region_name): + signing_context = request.context.get('signing', {}) + # S3SigV4Auth will use the context['signing']['region'] value to + # sign with if present. This is used by the Bucket redirector + # as well but we should be fine because the redirector is never + # used in combination with the accesspoint setting logic. + signing_context['region'] = region_name + request.context['signing'] = signing_context + + def _override_signing_name(self, request, signing_name): + signing_context = request.context.get('signing', {}) + # S3SigV4Auth will use the context['signing']['signing_name'] value to + # sign with if present. This is used by the Bucket redirector + # as well but we should be fine because the redirector is never + # used in combination with the accesspoint setting logic. + signing_context['signing_name'] = signing_name + request.context['signing'] = signing_context + + def _add_headers_from_arn_details(self, request): + arn_details = request.context['arn_details'] + outpost_name = arn_details.get('outpost_name') + if outpost_name: + self._add_outpost_id_header(request, outpost_name) + + def _add_outpost_id_header(self, request, outpost_name): + request.headers['x-amz-outpost-id'] = outpost_name + + +class S3ControlArnParamHandler: + """This handler has been replaced by S3ControlArnParamHandlerv2. The + original version remains in place for any third-party importers. + """ + + _RESOURCE_SPLIT_REGEX = re.compile(r'[/:]') + + def __init__(self, arn_parser=None): + self._arn_parser = arn_parser + if arn_parser is None: + self._arn_parser = ArnParser() + warnings.warn( + 'The S3ControlArnParamHandler class has been deprecated for a new ' + 'internal replacement. A future version of botocore may remove ' + 'this class.', + category=FutureWarning, + ) + + def register(self, event_emitter): + event_emitter.register( + 'before-parameter-build.s3-control', + self.handle_arn, + ) + + def handle_arn(self, params, model, context, **kwargs): + if model.name in ('CreateBucket', 'ListRegionalBuckets'): + # CreateBucket and ListRegionalBuckets are special cases that do + # not obey ARN based redirection but will redirect based off of the + # presence of the OutpostId parameter + self._handle_outpost_id_param(params, model, context) + else: + self._handle_name_param(params, model, context) + self._handle_bucket_param(params, model, context) + + def _get_arn_details_from_param(self, params, param_name): + if param_name not in params: + return None + try: + arn = params[param_name] + arn_details = self._arn_parser.parse_arn(arn) + arn_details['original'] = arn + arn_details['resources'] = self._split_resource(arn_details) + return arn_details + except InvalidArnException: + return None + + def _split_resource(self, arn_details): + return self._RESOURCE_SPLIT_REGEX.split(arn_details['resource']) + + def _override_account_id_param(self, params, arn_details): + account_id = arn_details['account'] + if 'AccountId' in params and params['AccountId'] != account_id: + error_msg = ( + 'Account ID in arn does not match the AccountId parameter ' + 'provided: "{}"' + ).format(params['AccountId']) + raise UnsupportedS3ControlArnError( + arn=arn_details['original'], + msg=error_msg, + ) + params['AccountId'] = account_id + + def _handle_outpost_id_param(self, params, model, context): + if 'OutpostId' not in params: + return + context['outpost_id'] = params['OutpostId'] + + def _handle_name_param(self, params, model, context): + # CreateAccessPoint is a special case that does not expand Name + if model.name == 'CreateAccessPoint': + return + arn_details = self._get_arn_details_from_param(params, 'Name') + if arn_details is None: + return + if self._is_outpost_accesspoint(arn_details): + self._store_outpost_accesspoint(params, context, arn_details) + else: + error_msg = 'The Name parameter does not support the provided ARN' + raise UnsupportedS3ControlArnError( + arn=arn_details['original'], + msg=error_msg, + ) + + def _is_outpost_accesspoint(self, arn_details): + if arn_details['service'] != 's3-outposts': + return False + resources = arn_details['resources'] + if len(resources) != 4: + return False + # Resource must be of the form outpost/op-123/accesspoint/name + return resources[0] == 'outpost' and resources[2] == 'accesspoint' + + def _store_outpost_accesspoint(self, params, context, arn_details): + self._override_account_id_param(params, arn_details) + accesspoint_name = arn_details['resources'][3] + params['Name'] = accesspoint_name + arn_details['accesspoint_name'] = accesspoint_name + arn_details['outpost_name'] = arn_details['resources'][1] + context['arn_details'] = arn_details + + def _handle_bucket_param(self, params, model, context): + arn_details = self._get_arn_details_from_param(params, 'Bucket') + if arn_details is None: + return + if self._is_outpost_bucket(arn_details): + self._store_outpost_bucket(params, context, arn_details) + else: + error_msg = ( + 'The Bucket parameter does not support the provided ARN' + ) + raise UnsupportedS3ControlArnError( + arn=arn_details['original'], + msg=error_msg, + ) + + def _is_outpost_bucket(self, arn_details): + if arn_details['service'] != 's3-outposts': + return False + resources = arn_details['resources'] + if len(resources) != 4: + return False + # Resource must be of the form outpost/op-123/bucket/name + return resources[0] == 'outpost' and resources[2] == 'bucket' + + def _store_outpost_bucket(self, params, context, arn_details): + self._override_account_id_param(params, arn_details) + bucket_name = arn_details['resources'][3] + params['Bucket'] = bucket_name + arn_details['bucket_name'] = bucket_name + arn_details['outpost_name'] = arn_details['resources'][1] + context['arn_details'] = arn_details + + +class S3ControlArnParamHandlerv2(S3ControlArnParamHandler): + """Updated version of S3ControlArnParamHandler for use when + EndpointRulesetResolver is in use for endpoint resolution. + + This class is considered private and subject to abrupt breaking changes or + removal without prior announcement. Please do not use it directly. + """ + + def __init__(self, arn_parser=None): + self._arn_parser = arn_parser + if arn_parser is None: + self._arn_parser = ArnParser() + + def register(self, event_emitter): + event_emitter.register( + 'before-endpoint-resolution.s3-control', + self.handle_arn, + ) + + def _handle_name_param(self, params, model, context): + # CreateAccessPoint is a special case that does not expand Name + if model.name == 'CreateAccessPoint': + return + arn_details = self._get_arn_details_from_param(params, 'Name') + if arn_details is None: + return + self._raise_for_fips_pseudo_region(arn_details) + self._raise_for_accelerate_endpoint(context) + if self._is_outpost_accesspoint(arn_details): + self._store_outpost_accesspoint(params, context, arn_details) + else: + error_msg = 'The Name parameter does not support the provided ARN' + raise UnsupportedS3ControlArnError( + arn=arn_details['original'], + msg=error_msg, + ) + + def _store_outpost_accesspoint(self, params, context, arn_details): + self._override_account_id_param(params, arn_details) + + def _handle_bucket_param(self, params, model, context): + arn_details = self._get_arn_details_from_param(params, 'Bucket') + if arn_details is None: + return + self._raise_for_fips_pseudo_region(arn_details) + self._raise_for_accelerate_endpoint(context) + if self._is_outpost_bucket(arn_details): + self._store_outpost_bucket(params, context, arn_details) + else: + error_msg = ( + 'The Bucket parameter does not support the provided ARN' + ) + raise UnsupportedS3ControlArnError( + arn=arn_details['original'], + msg=error_msg, + ) + + def _store_outpost_bucket(self, params, context, arn_details): + self._override_account_id_param(params, arn_details) + + def _raise_for_fips_pseudo_region(self, arn_details): + # FIPS pseudo region names cannot be used in ARNs + arn_region = arn_details['region'] + if arn_region.startswith('fips-') or arn_region.endswith('fips-'): + raise UnsupportedS3ControlArnError( + arn=arn_details['original'], + msg='Invalid ARN, FIPS region not allowed in ARN.', + ) + + def _raise_for_accelerate_endpoint(self, context): + s3_config = context['client_config'].s3 or {} + if s3_config.get('use_accelerate_endpoint'): + raise UnsupportedS3ControlConfigurationError( + msg='S3 control client does not support accelerate endpoints', + ) + + +class ContainerMetadataFetcher: + TIMEOUT_SECONDS = 2 + RETRY_ATTEMPTS = 3 + SLEEP_TIME = 1 + IP_ADDRESS = '169.254.170.2' + _ALLOWED_HOSTS = [ + IP_ADDRESS, + '169.254.170.23', + 'fd00:ec2::23', + 'localhost', + ] + + def __init__(self, session=None, sleep=time.sleep): + if session is None: + session = botocore.httpsession.URLLib3Session( + timeout=self.TIMEOUT_SECONDS + ) + self._session = session + self._sleep = sleep + + def retrieve_full_uri(self, full_url, headers=None): + """Retrieve JSON metadata from container metadata. + + :type full_url: str + :param full_url: The full URL of the metadata service. + This should include the scheme as well, e.g + "http://localhost:123/foo" + + """ + self._validate_allowed_url(full_url) + return self._retrieve_credentials(full_url, headers) + + def _validate_allowed_url(self, full_url): + parsed = botocore.compat.urlparse(full_url) + + if parsed.scheme == 'https': + return + if self._is_loopback_address(parsed.hostname): + return + is_whitelisted_host = self._check_if_whitelisted_host(parsed.hostname) + if not is_whitelisted_host: + raise ValueError( + f"Unsupported host '{parsed.hostname}'. Can only retrieve metadata " + f"from a loopback address or one of these hosts: {', '.join(self._ALLOWED_HOSTS)}" + ) + + def _is_loopback_address(self, hostname): + try: + ip = ip_address(hostname) + return ip.is_loopback + except ValueError: + return False + + def _check_if_whitelisted_host(self, host): + if host in self._ALLOWED_HOSTS: + return True + return False + + def retrieve_uri(self, relative_uri): + """Retrieve JSON metadata from container metadata. + + :type relative_uri: str + :param relative_uri: A relative URI, e.g "/foo/bar?id=123" + + :return: The parsed JSON response. + + """ + full_url = self.full_url(relative_uri) + return self._retrieve_credentials(full_url) + + def _retrieve_credentials(self, full_url, extra_headers=None): + headers = {'Accept': 'application/json'} + if extra_headers is not None: + headers.update(extra_headers) + attempts = 0 + while True: + try: + return self._get_response( + full_url, headers, self.TIMEOUT_SECONDS + ) + except MetadataRetrievalError as e: + logger.debug( + "Received error when attempting to retrieve " + "container metadata: %s", + e, + exc_info=True, + ) + self._sleep(self.SLEEP_TIME) + attempts += 1 + if attempts >= self.RETRY_ATTEMPTS: + raise + + def _get_response(self, full_url, headers, timeout): + try: + AWSRequest = botocore.awsrequest.AWSRequest + request = AWSRequest(method='GET', url=full_url, headers=headers) + response = self._session.send(request.prepare()) + response_text = response.content.decode('utf-8') + if response.status_code != 200: + raise MetadataRetrievalError( + error_msg=( + f"Received non 200 response {response.status_code} " + f"from container metadata: {response_text}" + ) + ) + try: + return json.loads(response_text) + except ValueError: + error_msg = "Unable to parse JSON returned from container metadata services" + logger.debug('%s:%s', error_msg, response_text) + raise MetadataRetrievalError(error_msg=error_msg) + except RETRYABLE_HTTP_ERRORS as e: + error_msg = ( + "Received error when attempting to retrieve " + f"container metadata: {e}" + ) + raise MetadataRetrievalError(error_msg=error_msg) + + def full_url(self, relative_uri): + return f'http://{self.IP_ADDRESS}{relative_uri}' + + +def get_environ_proxies(url): + if should_bypass_proxies(url): + return {} + else: + return getproxies() + + +def should_bypass_proxies(url): + """ + Returns whether we should bypass proxies or not. + """ + # NOTE: requests allowed for ip/cidr entries in no_proxy env that we don't + # support current as urllib only checks DNS suffix + # If the system proxy settings indicate that this URL should be bypassed, + # don't proxy. + # The proxy_bypass function is incredibly buggy on OS X in early versions + # of Python 2.6, so allow this call to fail. Only catch the specific + # exceptions we've seen, though: this call failing in other ways can reveal + # legitimate problems. + try: + if proxy_bypass(urlparse(url).netloc): + return True + except (TypeError, socket.gaierror): + pass + + return False + + +def determine_content_length(body): + # No body, content length of 0 + if not body: + return 0 + + # Try asking the body for it's length + try: + return len(body) + except (AttributeError, TypeError): + pass + + # Try getting the length from a seekable stream + if hasattr(body, 'seek') and hasattr(body, 'tell'): + try: + orig_pos = body.tell() + body.seek(0, 2) + end_file_pos = body.tell() + body.seek(orig_pos) + return end_file_pos - orig_pos + except io.UnsupportedOperation: + # in case when body is, for example, io.BufferedIOBase object + # it has "seek" method which throws "UnsupportedOperation" + # exception in such case we want to fall back to "chunked" + # encoding + pass + # Failed to determine the length + return None + + +def get_encoding_from_headers(headers, default='ISO-8859-1'): + """Returns encodings from given HTTP Header Dict. + + :param headers: dictionary to extract encoding from. + :param default: default encoding if the content-type is text + """ + + content_type = headers.get('content-type') + + if not content_type: + return None + + message = email.message.Message() + message['content-type'] = content_type + charset = message.get_param("charset") + + if charset is not None: + return charset + + if 'text' in content_type: + return default + + +def calculate_md5(body, **kwargs): + """This function has been deprecated, but is kept for backwards compatibility.""" + if isinstance(body, (bytes, bytearray)): + binary_md5 = _calculate_md5_from_bytes(body) + else: + binary_md5 = _calculate_md5_from_file(body) + return base64.b64encode(binary_md5).decode('ascii') + + +def _calculate_md5_from_bytes(body_bytes): + """This function has been deprecated, but is kept for backwards compatibility.""" + md5 = get_md5(body_bytes, usedforsecurity=False) + return md5.digest() + + +def _calculate_md5_from_file(fileobj): + """This function has been deprecated, but is kept for backwards compatibility.""" + start_position = fileobj.tell() + md5 = get_md5(usedforsecurity=False) + for chunk in iter(lambda: fileobj.read(1024 * 1024), b''): + md5.update(chunk) + fileobj.seek(start_position) + return md5.digest() + + +def _is_s3express_request(params): + endpoint_properties = params.get('context', {}).get( + 'endpoint_properties', {} + ) + return endpoint_properties.get('backend') == 'S3Express' + + +def has_checksum_header(params): + """ + Checks if a header starting with "x-amz-checksum-" is provided in a request. + + This function is considered private and subject to abrupt breaking changes or + removal without prior announcement. Please do not use it directly. + """ + headers = params['headers'] + + # If a header matching the x-amz-checksum-* pattern is present, we + # assume a checksum has already been provided by the user. + for header in headers: + if CHECKSUM_HEADER_PATTERN.match(header): + return True + + return False + + +def conditionally_calculate_checksum(params, **kwargs): + """This function has been deprecated, but is kept for backwards compatibility.""" + if not has_checksum_header(params): + conditionally_calculate_md5(params, **kwargs) + conditionally_enable_crc32(params, **kwargs) + + +def conditionally_enable_crc32(params, **kwargs): + """This function has been deprecated, but is kept for backwards compatibility.""" + checksum_context = params.get('context', {}).get('checksum', {}) + checksum_algorithm = checksum_context.get('request_algorithm') + if ( + _is_s3express_request(params) + and params['body'] is not None + and checksum_algorithm in (None, "conditional-md5") + ): + params['context']['checksum'] = { + 'request_algorithm': { + 'algorithm': 'crc32', + 'in': 'header', + 'name': 'x-amz-checksum-crc32', + } + } + + +def conditionally_calculate_md5(params, **kwargs): + """Only add a Content-MD5 if the system supports it. + + This function has been deprecated, but is kept for backwards compatibility. + """ + body = params['body'] + checksum_context = params.get('context', {}).get('checksum', {}) + checksum_algorithm = checksum_context.get('request_algorithm') + if checksum_algorithm and checksum_algorithm != 'conditional-md5': + # Skip for requests that will have a flexible checksum applied + return + + if has_checksum_header(params): + # Don't add a new header if one is already available. + return + + if _is_s3express_request(params): + # S3Express doesn't support MD5 + return + + if MD5_AVAILABLE and body is not None: + md5_digest = calculate_md5(body, **kwargs) + params['headers']['Content-MD5'] = md5_digest + + +class FileWebIdentityTokenLoader: + def __init__(self, web_identity_token_path, _open=open): + self._web_identity_token_path = web_identity_token_path + self._open = _open + + def __call__(self): + with self._open(self._web_identity_token_path) as token_file: + return token_file.read() + + +class SSOTokenLoader: + def __init__(self, cache=None): + if cache is None: + cache = {} + self._cache = cache + + def _generate_cache_key(self, start_url, session_name): + input_str = start_url + if session_name is not None: + input_str = session_name + return hashlib.sha1(input_str.encode('utf-8')).hexdigest() + + def save_token(self, start_url, token, session_name=None): + cache_key = self._generate_cache_key(start_url, session_name) + self._cache[cache_key] = token + + def __call__(self, start_url, session_name=None): + cache_key = self._generate_cache_key(start_url, session_name) + logger.debug('Checking for cached token at: %s', cache_key) + if cache_key not in self._cache: + name = start_url + if session_name is not None: + name = session_name + error_msg = f'Token for {name} does not exist' + raise SSOTokenLoadError(error_msg=error_msg) + + token = self._cache[cache_key] + if 'accessToken' not in token or 'expiresAt' not in token: + error_msg = f'Token for {start_url} is invalid' + raise SSOTokenLoadError(error_msg=error_msg) + return token + + +class EventbridgeSignerSetter: + _DEFAULT_PARTITION = 'aws' + _DEFAULT_DNS_SUFFIX = 'amazonaws.com' + + def __init__(self, endpoint_resolver, region=None, endpoint_url=None): + self._endpoint_resolver = endpoint_resolver + self._region = region + self._endpoint_url = endpoint_url + + def register(self, event_emitter): + event_emitter.register( + 'before-parameter-build.events.PutEvents', + self.check_for_global_endpoint, + ) + event_emitter.register( + 'before-call.events.PutEvents', self.set_endpoint_url + ) + + def set_endpoint_url(self, params, context, **kwargs): + if 'eventbridge_endpoint' in context: + endpoint = context['eventbridge_endpoint'] + logger.debug( + "Rewriting URL from %s to %s", params['url'], endpoint + ) + params['url'] = endpoint + + def check_for_global_endpoint(self, params, context, **kwargs): + endpoint = params.get('EndpointId') + if endpoint is None: + return + + if len(endpoint) == 0: + raise InvalidEndpointConfigurationError( + msg='EndpointId must not be a zero length string' + ) + + if not HAS_CRT: + raise MissingDependencyException( + msg="Using EndpointId requires an additional " + "dependency. You will need to pip install " + "botocore[crt] before proceeding." + ) + + config = context.get('client_config') + endpoint_variant_tags = None + if config is not None: + if config.use_fips_endpoint: + raise InvalidEndpointConfigurationError( + msg="FIPS is not supported with EventBridge " + "multi-region endpoints." + ) + if config.use_dualstack_endpoint: + endpoint_variant_tags = ['dualstack'] + + if self._endpoint_url is None: + # Validate endpoint is a valid hostname component + parts = urlparse(f'https://{endpoint}') + if parts.hostname != endpoint: + raise InvalidEndpointConfigurationError( + msg='EndpointId is not a valid hostname component.' + ) + resolved_endpoint = self._get_global_endpoint( + endpoint, endpoint_variant_tags=endpoint_variant_tags + ) + else: + resolved_endpoint = self._endpoint_url + + context['eventbridge_endpoint'] = resolved_endpoint + context['auth_type'] = 'v4a' + + def _get_global_endpoint(self, endpoint, endpoint_variant_tags=None): + resolver = self._endpoint_resolver + + partition = resolver.get_partition_for_region(self._region) + if partition is None: + partition = self._DEFAULT_PARTITION + dns_suffix = resolver.get_partition_dns_suffix( + partition, endpoint_variant_tags=endpoint_variant_tags + ) + if dns_suffix is None: + dns_suffix = self._DEFAULT_DNS_SUFFIX + + return f"https://{endpoint}.endpoint.events.{dns_suffix}/" + + +def is_s3_accelerate_url(url): + """Does the URL match the S3 Accelerate endpoint scheme? + + Virtual host naming style with bucket names in the netloc part of the URL + are not allowed by this function. + """ + if url is None: + return False + + # Accelerate is only valid for Amazon endpoints. + url_parts = urlsplit(url) + if not url_parts.netloc.endswith( + 'amazonaws.com' + ) or url_parts.scheme not in ['https', 'http']: + return False + + # The first part of the URL must be s3-accelerate. + parts = url_parts.netloc.split('.') + if parts[0] != 's3-accelerate': + return False + + # Url parts between 's3-accelerate' and 'amazonaws.com' which + # represent different url features. + feature_parts = parts[1:-2] + + # There should be no duplicate URL parts. + if len(feature_parts) != len(set(feature_parts)): + return False + + # Remaining parts must all be in the whitelist. + return all(p in S3_ACCELERATE_WHITELIST for p in feature_parts) + + +class JSONFileCache: + """JSON file cache. + This provides a dict like interface that stores JSON serializable + objects. + The objects are serialized to JSON and stored in a file. These + values can be retrieved at a later time. + """ + + CACHE_DIR = os.path.expanduser(os.path.join('~', '.aws', 'boto', 'cache')) + + def __init__(self, working_dir=CACHE_DIR, dumps_func=None): + self._working_dir = working_dir + if dumps_func is None: + dumps_func = self._default_dumps + self._dumps = dumps_func + + def _default_dumps(self, obj): + return json.dumps(obj, default=self._serialize_if_needed) + + def __contains__(self, cache_key): + actual_key = self._convert_cache_key(cache_key) + return os.path.isfile(actual_key) + + def __getitem__(self, cache_key): + """Retrieve value from a cache key.""" + actual_key = self._convert_cache_key(cache_key) + try: + with open(actual_key) as f: + return json.load(f) + except (OSError, ValueError): + raise KeyError(cache_key) + + def __delitem__(self, cache_key): + actual_key = self._convert_cache_key(cache_key) + try: + key_path = Path(actual_key) + key_path.unlink() + except FileNotFoundError: + raise KeyError(cache_key) + + def __setitem__(self, cache_key, value): + full_key = self._convert_cache_key(cache_key) + try: + file_content = self._dumps(value) + except (TypeError, ValueError): + raise ValueError( + f"Value cannot be cached, must be JSON serializable: {value}" + ) + if not os.path.isdir(self._working_dir): + os.makedirs(self._working_dir, exist_ok=True) + + temp_fd, temp_path = tempfile.mkstemp( + dir=self._working_dir, suffix='.tmp' + ) + with os.fdopen(temp_fd, 'w') as f: + f.write(file_content) + f.flush() + os.fsync(f.fileno()) + + os.replace(temp_path, full_key) + + def _convert_cache_key(self, cache_key): + full_path = os.path.join(self._working_dir, cache_key + '.json') + return full_path + + def _serialize_if_needed(self, value, iso=False): + if isinstance(value, _DatetimeClass): + if iso: + return value.isoformat() + return value.strftime('%Y-%m-%dT%H:%M:%S%Z') + return value + + +def generate_login_cache_key(sign_in_session_name): + return hashlib.sha256(sign_in_session_name.encode('utf-8')).hexdigest() + + +def is_s3express_bucket(bucket): + if bucket is None: + return False + return bucket.endswith('--x-s3') + + +def get_token_from_environment(signing_name, environ=None): + if not isinstance(signing_name, str) or not signing_name.strip(): + return None + + if environ is None: + environ = os.environ + env_var = _get_bearer_env_var_name(signing_name) + return environ.get(env_var) + + +def _get_bearer_env_var_name(signing_name): + bearer_name = signing_name.replace('-', '_').replace(' ', '_').upper() + return f"AWS_BEARER_TOKEN_{bearer_name}" + + +# This parameter is not part of the public interface and is subject to abrupt +# breaking changes or removal without prior announcement. +# Mapping of services that have been renamed for backwards compatibility reasons. +# Keys are the previous name that should be allowed, values are the documented +# and preferred client name. +SERVICE_NAME_ALIASES = {'runtime.sagemaker': 'sagemaker-runtime'} + + +# This parameter is not part of the public interface and is subject to abrupt +# breaking changes or removal without prior announcement. +# Mapping to determine the service ID for services that do not use it as the +# model data directory name. The keys are the data directory name and the +# values are the transformed service IDs (lower case and hyphenated). +CLIENT_NAME_TO_HYPHENIZED_SERVICE_ID_OVERRIDES = { + # Actual service name we use -> Allowed computed service name. + 'apigateway': 'api-gateway', + 'application-autoscaling': 'application-auto-scaling', + 'appmesh': 'app-mesh', + 'autoscaling': 'auto-scaling', + 'autoscaling-plans': 'auto-scaling-plans', + 'ce': 'cost-explorer', + 'cloudhsmv2': 'cloudhsm-v2', + 'cloudsearchdomain': 'cloudsearch-domain', + 'cognito-idp': 'cognito-identity-provider', + 'config': 'config-service', + 'cur': 'cost-and-usage-report-service', + 'datapipeline': 'data-pipeline', + 'directconnect': 'direct-connect', + 'devicefarm': 'device-farm', + 'discovery': 'application-discovery-service', + 'dms': 'database-migration-service', + 'ds': 'directory-service', + 'ds-data': 'directory-service-data', + 'dynamodbstreams': 'dynamodb-streams', + 'elasticbeanstalk': 'elastic-beanstalk', + 'elb': 'elastic-load-balancing', + 'elbv2': 'elastic-load-balancing-v2', + 'es': 'elasticsearch-service', + 'events': 'eventbridge', + 'globalaccelerator': 'global-accelerator', + 'iot-data': 'iot-data-plane', + 'iot-jobs-data': 'iot-jobs-data-plane', + 'iotevents-data': 'iot-events-data', + 'iotevents': 'iot-events', + 'iotwireless': 'iot-wireless', + 'kinesisanalytics': 'kinesis-analytics', + 'kinesisanalyticsv2': 'kinesis-analytics-v2', + 'kinesisvideo': 'kinesis-video', + 'lex-models': 'lex-model-building-service', + 'lexv2-models': 'lex-models-v2', + 'lex-runtime': 'lex-runtime-service', + 'lexv2-runtime': 'lex-runtime-v2', + 'logs': 'cloudwatch-logs', + 'machinelearning': 'machine-learning', + 'marketplacecommerceanalytics': 'marketplace-commerce-analytics', + 'marketplace-entitlement': 'marketplace-entitlement-service', + 'meteringmarketplace': 'marketplace-metering', + 'mgh': 'migration-hub', + 'sms-voice': 'pinpoint-sms-voice', + 'resourcegroupstaggingapi': 'resource-groups-tagging-api', + 'route53': 'route-53', + 'route53domains': 'route-53-domains', + 's3control': 's3-control', + 'sdb': 'simpledb', + 'secretsmanager': 'secrets-manager', + 'serverlessrepo': 'serverlessapplicationrepository', + 'servicecatalog': 'service-catalog', + 'servicecatalog-appregistry': 'service-catalog-appregistry', + 'stepfunctions': 'sfn', + 'storagegateway': 'storage-gateway', +} + + +def get_login_token_cache_directory(): + """Returns which directory contains the login_session token files""" + if 'AWS_LOGIN_CACHE_DIRECTORY' in os.environ: + path = os.path.expandvars(os.environ['AWS_LOGIN_CACHE_DIRECTORY']) + path = os.path.expanduser(path) + return path + else: + return os.path.expanduser(os.path.join('~', '.aws', 'login', 'cache')) + + +class LoginTokenLoader: + """Loads and saves login access tokens to disk""" + + def __init__(self, cache=None): + if cache is None: + cache = {} + self._cache = cache + + def save_token(self, session_name, token): + cache_key = generate_login_cache_key(session_name) + self._cache[cache_key] = token + + def load_token(self, session_name): + cache_key = generate_login_cache_key(session_name) + if cache_key not in self._cache: + return None + return self._cache[cache_key] diff --git a/py311/lib/python3.11/site-packages/botocore/validate.py b/py311/lib/python3.11/site-packages/botocore/validate.py new file mode 100644 index 0000000000000000000000000000000000000000..ba9f62fad6d58e37a82d19f4d137b37989e9dacf --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/validate.py @@ -0,0 +1,384 @@ +"""User input parameter validation. + +This module handles user input parameter validation +against a provided input model. + +Note that the objects in this module do *not* mutate any +arguments. No type version happens here. It is up to another +layer to properly convert arguments to any required types. + +Validation Errors +----------------- + + +""" + +import decimal +import json +from datetime import datetime + +from botocore.exceptions import ParamValidationError +from botocore.utils import is_json_value_header, parse_to_aware_datetime + + +def validate_parameters(params, shape): + """Validates input parameters against a schema. + + This is a convenience function that validates parameters against a schema. + You can also instantiate and use the ParamValidator class directly if you + want more control. + + If there are any validation errors then a ParamValidationError + will be raised. If there are no validation errors than no exception + is raised and a value of None is returned. + + :param params: The user provided input parameters. + + :type shape: botocore.model.Shape + :param shape: The schema which the input parameters should + adhere to. + + :raise: ParamValidationError + + """ + validator = ParamValidator() + report = validator.validate(params, shape) + if report.has_errors(): + raise ParamValidationError(report=report.generate_report()) + + +def type_check(valid_types): + def _create_type_check_guard(func): + def _on_passes_type_check(self, param, shape, errors, name): + if _type_check(param, errors, name): + return func(self, param, shape, errors, name) + + def _type_check(param, errors, name): + if not isinstance(param, valid_types): + valid_type_names = [str(t) for t in valid_types] + errors.report( + name, + 'invalid type', + param=param, + valid_types=valid_type_names, + ) + return False + return True + + return _on_passes_type_check + + return _create_type_check_guard + + +def range_check(name, value, shape, error_type, errors): + failed = False + min_allowed = float('-inf') + if 'min' in shape.metadata: + min_allowed = shape.metadata['min'] + if value < min_allowed: + failed = True + elif hasattr(shape, 'serialization'): + # Members that can be bound to the host have an implicit min of 1 + if shape.serialization.get('hostLabel'): + min_allowed = 1 + if value < min_allowed: + failed = True + if failed: + errors.report(name, error_type, param=value, min_allowed=min_allowed) + + +class ValidationErrors: + def __init__(self): + self._errors = [] + + def has_errors(self): + if self._errors: + return True + return False + + def generate_report(self): + error_messages = [] + for error in self._errors: + error_messages.append(self._format_error(error)) + return '\n'.join(error_messages) + + def _format_error(self, error): + error_type, name, additional = error + name = self._get_name(name) + if error_type == 'missing required field': + return ( + f"Missing required parameter in {name}: " + f"\"{additional['required_name']}\"" + ) + elif error_type == 'unknown field': + unknown_param = additional['unknown_param'] + valid_names = ', '.join(additional['valid_names']) + return ( + f'Unknown parameter in {name}: "{unknown_param}", ' + f'must be one of: {valid_names}' + ) + elif error_type == 'invalid type': + param = additional['param'] + param_type = type(param) + valid_types = ', '.join(additional['valid_types']) + return ( + f'Invalid type for parameter {name}, value: {param}, ' + f'type: {param_type}, valid types: {valid_types}' + ) + elif error_type == 'invalid range': + param = additional['param'] + min_allowed = additional['min_allowed'] + return ( + f'Invalid value for parameter {name}, value: {param}, ' + f'valid min value: {min_allowed}' + ) + elif error_type == 'invalid length': + param = additional['param'] + min_allowed = additional['min_allowed'] + return ( + f'Invalid length for parameter {name}, value: {param}, ' + f'valid min length: {min_allowed}' + ) + elif error_type == 'unable to encode to json': + return 'Invalid parameter {} must be json serializable: {}'.format( + name, + additional['type_error'], + ) + elif error_type == 'invalid type for document': + param = additional['param'] + param_type = type(param) + valid_types = ', '.join(additional['valid_types']) + return ( + f'Invalid type for document parameter {name}, value: {param}, ' + f'type: {param_type}, valid types: {valid_types}' + ) + elif error_type == 'more than one input': + members = ', '.join(additional['members']) + return ( + f'Invalid number of parameters set for tagged union structure ' + f'{name}. Can only set one of the following keys: ' + f'{members}.' + ) + elif error_type == 'empty input': + members = ', '.join(additional['members']) + return ( + f'Must set one of the following keys for tagged union' + f'structure {name}: {members}.' + ) + + def _get_name(self, name): + if not name: + return 'input' + elif name.startswith('.'): + return name[1:] + else: + return name + + def report(self, name, reason, **kwargs): + self._errors.append((reason, name, kwargs)) + + +class ParamValidator: + """Validates parameters against a shape model.""" + + def validate(self, params, shape): + """Validate parameters against a shape model. + + This method will validate the parameters against a provided shape model. + All errors will be collected before returning to the caller. This means + that this method will not stop at the first error, it will return all + possible errors. + + :param params: User provided dict of parameters + :param shape: A shape model describing the expected input. + + :return: A list of errors. + + """ + errors = ValidationErrors() + self._validate(params, shape, errors, name='') + return errors + + def _check_special_validation_cases(self, shape): + if is_json_value_header(shape): + return self._validate_jsonvalue_string + if shape.type_name == 'structure' and shape.is_document_type: + return self._validate_document + + def _validate(self, params, shape, errors, name): + special_validator = self._check_special_validation_cases(shape) + if special_validator: + special_validator(params, shape, errors, name) + else: + getattr(self, f'_validate_{shape.type_name}')( + params, shape, errors, name + ) + + def _validate_jsonvalue_string(self, params, shape, errors, name): + # Check to see if a value marked as a jsonvalue can be dumped to + # a json string. + try: + json.dumps(params) + except (ValueError, TypeError) as e: + errors.report(name, 'unable to encode to json', type_error=e) + + def _validate_document(self, params, shape, errors, name): + if params is None: + return + + if isinstance(params, dict): + for key in params: + self._validate_document(params[key], shape, errors, key) + elif isinstance(params, list): + for index, entity in enumerate(params): + self._validate_document( + entity, shape, errors, f'{name}[{index}]' + ) + elif not isinstance(params, ((str,), int, bool, float)): + valid_types = (str, int, bool, float, list, dict) + valid_type_names = [str(t) for t in valid_types] + errors.report( + name, + 'invalid type for document', + param=params, + param_type=type(params), + valid_types=valid_type_names, + ) + + @type_check(valid_types=(dict,)) + def _validate_structure(self, params, shape, errors, name): + if shape.is_tagged_union: + if len(params) == 0: + errors.report(name, 'empty input', members=shape.members) + elif len(params) > 1: + errors.report( + name, 'more than one input', members=shape.members + ) + + # Validate required fields. + for required_member in shape.metadata.get('required', []): + if required_member not in params: + errors.report( + name, + 'missing required field', + required_name=required_member, + user_params=params, + ) + members = shape.members + known_params = [] + # Validate known params. + for param in params: + if param not in members: + errors.report( + name, + 'unknown field', + unknown_param=param, + valid_names=list(members), + ) + else: + known_params.append(param) + # Validate structure members. + for param in known_params: + self._validate( + params[param], + shape.members[param], + errors, + f'{name}.{param}', + ) + + @type_check(valid_types=(str,)) + def _validate_string(self, param, shape, errors, name): + # Validate range. For a string, the min/max constraints + # are of the string length. + # Looks like: + # "WorkflowId":{ + # "type":"string", + # "min":1, + # "max":256 + # } + range_check(name, len(param), shape, 'invalid length', errors) + + @type_check(valid_types=(list, tuple)) + def _validate_list(self, param, shape, errors, name): + member_shape = shape.member + range_check(name, len(param), shape, 'invalid length', errors) + for i, item in enumerate(param): + self._validate(item, member_shape, errors, f'{name}[{i}]') + + @type_check(valid_types=(dict,)) + def _validate_map(self, param, shape, errors, name): + key_shape = shape.key + value_shape = shape.value + for key, value in param.items(): + self._validate(key, key_shape, errors, f"{name} (key: {key})") + self._validate(value, value_shape, errors, f'{name}.{key}') + + @type_check(valid_types=(int,)) + def _validate_integer(self, param, shape, errors, name): + range_check(name, param, shape, 'invalid range', errors) + + def _validate_blob(self, param, shape, errors, name): + if isinstance(param, (bytes, bytearray, str)): + return + elif hasattr(param, 'read'): + # File like objects are also allowed for blob types. + return + else: + errors.report( + name, + 'invalid type', + param=param, + valid_types=[str(bytes), str(bytearray), 'file-like object'], + ) + + @type_check(valid_types=(bool,)) + def _validate_boolean(self, param, shape, errors, name): + pass + + @type_check(valid_types=(float, decimal.Decimal) + (int,)) + def _validate_double(self, param, shape, errors, name): + range_check(name, param, shape, 'invalid range', errors) + + _validate_float = _validate_double + + @type_check(valid_types=(int,)) + def _validate_long(self, param, shape, errors, name): + range_check(name, param, shape, 'invalid range', errors) + + def _validate_timestamp(self, param, shape, errors, name): + # We don't use @type_check because datetimes are a bit + # more flexible. You can either provide a datetime + # object, or a string that parses to a datetime. + is_valid_type = self._type_check_datetime(param) + if not is_valid_type: + valid_type_names = [str(datetime), 'timestamp-string'] + errors.report( + name, 'invalid type', param=param, valid_types=valid_type_names + ) + + def _type_check_datetime(self, value): + try: + parse_to_aware_datetime(value) + return True + except (TypeError, ValueError, AttributeError): + # Yes, dateutil can sometimes raise an AttributeError + # when parsing timestamps. + return False + + +class ParamValidationDecorator: + def __init__(self, param_validator, serializer): + self._param_validator = param_validator + self._serializer = serializer + + def serialize_to_request(self, parameters, operation_model): + input_shape = operation_model.input_shape + if input_shape is not None: + report = self._param_validator.validate( + parameters, operation_model.input_shape + ) + if report.has_errors(): + raise ParamValidationError(report=report.generate_report()) + return self._serializer.serialize_to_request( + parameters, operation_model + ) diff --git a/py311/lib/python3.11/site-packages/botocore/waiter.py b/py311/lib/python3.11/site-packages/botocore/waiter.py new file mode 100644 index 0000000000000000000000000000000000000000..86110dd99e57994bd66774031031a94f2d391393 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore/waiter.py @@ -0,0 +1,396 @@ +# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import logging +import time +from functools import partial + +import jmespath + +from botocore.context import with_current_context +from botocore.docs.docstring import WaiterDocstring +from botocore.useragent import register_feature_id +from botocore.utils import get_service_module_name + +from . import xform_name +from .exceptions import ClientError, WaiterConfigError, WaiterError + +logger = logging.getLogger(__name__) + + +def create_waiter_with_client(waiter_name, waiter_model, client): + """ + + :type waiter_name: str + :param waiter_name: The name of the waiter. The name should match + the name (including the casing) of the key name in the waiter + model file (typically this is CamelCasing). + + :type waiter_model: botocore.waiter.WaiterModel + :param waiter_model: The model for the waiter configuration. + + :type client: botocore.client.BaseClient + :param client: The botocore client associated with the service. + + :rtype: botocore.waiter.Waiter + :return: The waiter object. + + """ + single_waiter_config = waiter_model.get_waiter(waiter_name) + operation_name = xform_name(single_waiter_config.operation) + operation_method = NormalizedOperationMethod( + getattr(client, operation_name) + ) + + # Create a new wait method that will serve as a proxy to the underlying + # Waiter.wait method. This is needed to attach a docstring to the + # method. + def wait(self, **kwargs): + Waiter.wait(self, **kwargs) + + wait.__doc__ = WaiterDocstring( + waiter_name=waiter_name, + event_emitter=client.meta.events, + service_model=client.meta.service_model, + service_waiter_model=waiter_model, + include_signature=False, + ) + + # Rename the waiter class based on the type of waiter. + waiter_class_name = str( + f'{get_service_module_name(client.meta.service_model)}.Waiter.{waiter_name}' + ) + + # Create the new waiter class + documented_waiter_cls = type(waiter_class_name, (Waiter,), {'wait': wait}) + + # Return an instance of the new waiter class. + return documented_waiter_cls( + waiter_name, single_waiter_config, operation_method + ) + + +def is_valid_waiter_error(response): + error = response.get('Error') + if isinstance(error, dict) and 'Code' in error: + return True + return False + + +class NormalizedOperationMethod: + def __init__(self, client_method): + self._client_method = client_method + + def __call__(self, **kwargs): + try: + return self._client_method(**kwargs) + except ClientError as e: + return e.response + + +class WaiterModel: + SUPPORTED_VERSION = 2 + + def __init__(self, waiter_config): + """ + + Note that the WaiterModel takes ownership of the waiter_config. + It may or may not mutate the waiter_config. If this is a concern, + it is best to make a copy of the waiter config before passing it to + the WaiterModel. + + :type waiter_config: dict + :param waiter_config: The loaded waiter config + from the *.waiters.json file. This can be + obtained from a botocore Loader object as well. + + """ + self._waiter_config = waiter_config['waiters'] + + # These are part of the public API. Changing these + # will result in having to update the consuming code, + # so don't change unless you really need to. + version = waiter_config.get('version', 'unknown') + self._verify_supported_version(version) + self.version = version + self.waiter_names = list(sorted(waiter_config['waiters'].keys())) + + def _verify_supported_version(self, version): + if version != self.SUPPORTED_VERSION: + raise WaiterConfigError( + error_msg=( + "Unsupported waiter version, supported version " + f"must be: {self.SUPPORTED_VERSION}, but version " + f"of waiter config is: {version}" + ) + ) + + def get_waiter(self, waiter_name): + try: + single_waiter_config = self._waiter_config[waiter_name] + except KeyError: + raise ValueError(f"Waiter does not exist: {waiter_name}") + return SingleWaiterConfig(single_waiter_config) + + +class SingleWaiterConfig: + """Represents the waiter configuration for a single waiter. + + A single waiter is considered the configuration for a single + value associated with a named waiter (i.e TableExists). + + """ + + def __init__(self, single_waiter_config): + self._config = single_waiter_config + + # These attributes are part of the public API. + self.description = single_waiter_config.get('description', '') + # Per the spec, these three fields are required. + self.operation = single_waiter_config['operation'] + self.delay = single_waiter_config['delay'] + self.max_attempts = single_waiter_config['maxAttempts'] + + @property + def acceptors(self): + acceptors = [] + for acceptor_config in self._config['acceptors']: + acceptor = AcceptorConfig(acceptor_config) + acceptors.append(acceptor) + return acceptors + + +class AcceptorConfig: + def __init__(self, config): + self.state = config['state'] + self.matcher = config['matcher'] + self.expected = config['expected'] + self.argument = config.get('argument') + self.matcher_func = self._create_matcher_func() + + @property + def explanation(self): + if self.matcher == 'path': + return f'For expression "{self.argument}" we matched expected path: "{self.expected}"' + elif self.matcher == 'pathAll': + return ( + f'For expression "{self.argument}" all members matched ' + f'expected path: "{self.expected}"' + ) + elif self.matcher == 'pathAny': + return ( + f'For expression "{self.argument}" we matched expected ' + f'path: "{self.expected}" at least once' + ) + elif self.matcher == 'status': + return f'Matched expected HTTP status code: {self.expected}' + elif self.matcher == 'error': + return f'Matched expected service error code: {self.expected}' + else: + return f'No explanation for unknown waiter type: "{self.matcher}"' + + def _create_matcher_func(self): + # An acceptor function is a callable that takes a single value. The + # parsed AWS response. Note that the parsed error response is also + # provided in the case of errors, so it's entirely possible to + # handle all the available matcher capabilities in the future. + # There's only three supported matchers, so for now, this is all + # contained to a single method. If this grows, we can expand this + # out to separate methods or even objects. + + if self.matcher == 'path': + return self._create_path_matcher() + elif self.matcher == 'pathAll': + return self._create_path_all_matcher() + elif self.matcher == 'pathAny': + return self._create_path_any_matcher() + elif self.matcher == 'status': + return self._create_status_matcher() + elif self.matcher == 'error': + return self._create_error_matcher() + else: + raise WaiterConfigError( + error_msg=f"Unknown acceptor: {self.matcher}" + ) + + def _create_path_matcher(self): + expression = jmespath.compile(self.argument) + expected = self.expected + + def acceptor_matches(response): + if is_valid_waiter_error(response): + return + return expression.search(response) == expected + + return acceptor_matches + + def _create_path_all_matcher(self): + expression = jmespath.compile(self.argument) + expected = self.expected + + def acceptor_matches(response): + if is_valid_waiter_error(response): + return + result = expression.search(response) + if not isinstance(result, list) or not result: + # pathAll matcher must result in a list. + # Also we require at least one element in the list, + # that is, an empty list should not result in this + # acceptor match. + return False + for element in result: + if element != expected: + return False + return True + + return acceptor_matches + + def _create_path_any_matcher(self): + expression = jmespath.compile(self.argument) + expected = self.expected + + def acceptor_matches(response): + if is_valid_waiter_error(response): + return + result = expression.search(response) + if not isinstance(result, list) or not result: + # pathAny matcher must result in a list. + # Also we require at least one element in the list, + # that is, an empty list should not result in this + # acceptor match. + return False + for element in result: + if element == expected: + return True + return False + + return acceptor_matches + + def _create_status_matcher(self): + expected = self.expected + + def acceptor_matches(response): + # We don't have any requirements on the expected incoming data + # other than it is a dict, so we don't assume there's + # a ResponseMetadata.HTTPStatusCode. + status_code = response.get('ResponseMetadata', {}).get( + 'HTTPStatusCode' + ) + return status_code == expected + + return acceptor_matches + + def _create_error_matcher(self): + expected = self.expected + + def acceptor_matches(response): + # When the client encounters an error, it will normally raise + # an exception. However, the waiter implementation will catch + # this exception, and instead send us the parsed error + # response. So response is still a dictionary, and in the case + # of an error response will contain the "Error" and + # "ResponseMetadata" key. + # When expected is True, accept any error code. + # When expected is False, check if any errors were encountered. + # Otherwise, check for a specific AWS error code. + if expected is True: + return "Error" in response and "Code" in response["Error"] + elif expected is False: + return "Error" not in response + else: + return response.get("Error", {}).get("Code", "") == expected + + return acceptor_matches + + +class Waiter: + def __init__(self, name, config, operation_method): + """ + + :type name: string + :param name: The name of the waiter + + :type config: botocore.waiter.SingleWaiterConfig + :param config: The configuration for the waiter. + + :type operation_method: callable + :param operation_method: A callable that accepts **kwargs + and returns a response. For example, this can be + a method from a botocore client. + + """ + self._operation_method = operation_method + # The two attributes are exposed to allow for introspection + # and documentation. + self.name = name + self.config = config + + @with_current_context(partial(register_feature_id, 'WAITER')) + def wait(self, **kwargs): + acceptors = list(self.config.acceptors) + current_state = 'waiting' + # pop the invocation specific config + config = kwargs.pop('WaiterConfig', {}) + sleep_amount = config.get('Delay', self.config.delay) + max_attempts = config.get('MaxAttempts', self.config.max_attempts) + last_matched_acceptor = None + num_attempts = 0 + + while True: + response = self._operation_method(**kwargs) + num_attempts += 1 + for acceptor in acceptors: + if acceptor.matcher_func(response): + last_matched_acceptor = acceptor + current_state = acceptor.state + break + else: + # If none of the acceptors matched, we should + # transition to the failure state if an error + # response was received. + if is_valid_waiter_error(response): + # Transition to a failure state, which we + # can just handle here by raising an exception. + raise WaiterError( + name=self.name, + reason='An error occurred ({}): {}'.format( + response['Error'].get('Code', 'Unknown'), + response['Error'].get('Message', 'Unknown'), + ), + last_response=response, + ) + if current_state == 'success': + logger.debug( + "Waiting complete, waiter matched the success state." + ) + return + if current_state == 'failure': + reason = f'Waiter encountered a terminal failure state: {acceptor.explanation}' + raise WaiterError( + name=self.name, + reason=reason, + last_response=response, + ) + if num_attempts >= max_attempts: + if last_matched_acceptor is None: + reason = 'Max attempts exceeded' + else: + reason = ( + f'Max attempts exceeded. Previously accepted state: ' + f'{acceptor.explanation}' + ) + raise WaiterError( + name=self.name, + reason=reason, + last_response=response, + ) + time.sleep(sleep_amount) diff --git a/py311/lib/python3.11/site-packages/distlib-0.4.0.dist-info/INSTALLER b/py311/lib/python3.11/site-packages/distlib-0.4.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a --- /dev/null +++ b/py311/lib/python3.11/site-packages/distlib-0.4.0.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/py311/lib/python3.11/site-packages/distlib-0.4.0.dist-info/LICENSE.txt b/py311/lib/python3.11/site-packages/distlib-0.4.0.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..c31ac56d772f7061c8b42f0ee96063b7a94a4859 --- /dev/null +++ b/py311/lib/python3.11/site-packages/distlib-0.4.0.dist-info/LICENSE.txt @@ -0,0 +1,284 @@ +A. HISTORY OF THE SOFTWARE +========================== + +Python was created in the early 1990s by Guido van Rossum at Stichting +Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands +as a successor of a language called ABC. Guido remains Python's +principal author, although it includes many contributions from others. + +In 1995, Guido continued his work on Python at the Corporation for +National Research Initiatives (CNRI, see http://www.cnri.reston.va.us) +in Reston, Virginia where he released several versions of the +software. + +In May 2000, Guido and the Python core development team moved to +BeOpen.com to form the BeOpen PythonLabs team. In October of the same +year, the PythonLabs team moved to Digital Creations (now Zope +Corporation, see http://www.zope.com). In 2001, the Python Software +Foundation (PSF, see http://www.python.org/psf/) was formed, a +non-profit organization created specifically to own Python-related +Intellectual Property. Zope Corporation is a sponsoring member of +the PSF. + +All Python releases are Open Source (see http://www.opensource.org for +the Open Source Definition). Historically, most, but not all, Python +releases have also been GPL-compatible; the table below summarizes +the various releases. + + Release Derived Year Owner GPL- + from compatible? (1) + + 0.9.0 thru 1.2 1991-1995 CWI yes + 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes + 1.6 1.5.2 2000 CNRI no + 2.0 1.6 2000 BeOpen.com no + 1.6.1 1.6 2001 CNRI yes (2) + 2.1 2.0+1.6.1 2001 PSF no + 2.0.1 2.0+1.6.1 2001 PSF yes + 2.1.1 2.1+2.0.1 2001 PSF yes + 2.2 2.1.1 2001 PSF yes + 2.1.2 2.1.1 2002 PSF yes + 2.1.3 2.1.2 2002 PSF yes + 2.2.1 2.2 2002 PSF yes + 2.2.2 2.2.1 2002 PSF yes + 2.2.3 2.2.2 2003 PSF yes + 2.3 2.2.2 2002-2003 PSF yes + 2.3.1 2.3 2002-2003 PSF yes + 2.3.2 2.3.1 2002-2003 PSF yes + 2.3.3 2.3.2 2002-2003 PSF yes + 2.3.4 2.3.3 2004 PSF yes + 2.3.5 2.3.4 2005 PSF yes + 2.4 2.3 2004 PSF yes + 2.4.1 2.4 2005 PSF yes + 2.4.2 2.4.1 2005 PSF yes + 2.4.3 2.4.2 2006 PSF yes + 2.4.4 2.4.3 2006 PSF yes + 2.5 2.4 2006 PSF yes + 2.5.1 2.5 2007 PSF yes + 2.5.2 2.5.1 2008 PSF yes + 2.5.3 2.5.2 2008 PSF yes + 2.6 2.5 2008 PSF yes + 2.6.1 2.6 2008 PSF yes + 2.6.2 2.6.1 2009 PSF yes + 2.6.3 2.6.2 2009 PSF yes + 2.6.4 2.6.3 2009 PSF yes + 2.6.5 2.6.4 2010 PSF yes + 3.0 2.6 2008 PSF yes + 3.0.1 3.0 2009 PSF yes + 3.1 3.0.1 2009 PSF yes + 3.1.1 3.1 2009 PSF yes + 3.1.2 3.1 2010 PSF yes + 3.2 3.1 2010 PSF yes + +Footnotes: + +(1) GPL-compatible doesn't mean that we're distributing Python under + the GPL. All Python licenses, unlike the GPL, let you distribute + a modified version without making your changes open source. The + GPL-compatible licenses make it possible to combine Python with + other software that is released under the GPL; the others don't. + +(2) According to Richard Stallman, 1.6.1 is not GPL-compatible, + because its license has a choice of law clause. According to + CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1 + is "not incompatible" with the GPL. + +Thanks to the many outside volunteers who have worked under Guido's +direction to make these releases possible. + + +B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON +=============================================================== + +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +-------------------------------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 +Python Software Foundation; All Rights Reserved" are retained in Python alone or +in any derivative version prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + + +BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0 +------------------------------------------- + +BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1 + +1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an +office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the +Individual or Organization ("Licensee") accessing and otherwise using +this software in source or binary form and its associated +documentation ("the Software"). + +2. Subject to the terms and conditions of this BeOpen Python License +Agreement, BeOpen hereby grants Licensee a non-exclusive, +royalty-free, world-wide license to reproduce, analyze, test, perform +and/or display publicly, prepare derivative works, distribute, and +otherwise use the Software alone or in any derivative version, +provided, however, that the BeOpen Python License is retained in the +Software, alone or in any derivative version prepared by Licensee. + +3. BeOpen is making the Software available to Licensee on an "AS IS" +basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE +SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS +AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY +DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +5. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +6. This License Agreement shall be governed by and interpreted in all +respects by the law of the State of California, excluding conflict of +law provisions. Nothing in this License Agreement shall be deemed to +create any relationship of agency, partnership, or joint venture +between BeOpen and Licensee. This License Agreement does not grant +permission to use BeOpen trademarks or trade names in a trademark +sense to endorse or promote products or services of Licensee, or any +third party. As an exception, the "BeOpen Python" logos available at +http://www.pythonlabs.com/logos.html may be used according to the +permissions granted on that web page. + +7. By copying, installing or otherwise using the software, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + + +CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1 +--------------------------------------- + +1. This LICENSE AGREEMENT is between the Corporation for National +Research Initiatives, having an office at 1895 Preston White Drive, +Reston, VA 20191 ("CNRI"), and the Individual or Organization +("Licensee") accessing and otherwise using Python 1.6.1 software in +source or binary form and its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, CNRI +hereby grants Licensee a nonexclusive, royalty-free, world-wide +license to reproduce, analyze, test, perform and/or display publicly, +prepare derivative works, distribute, and otherwise use Python 1.6.1 +alone or in any derivative version, provided, however, that CNRI's +License Agreement and CNRI's notice of copyright, i.e., "Copyright (c) +1995-2001 Corporation for National Research Initiatives; All Rights +Reserved" are retained in Python 1.6.1 alone or in any derivative +version prepared by Licensee. Alternately, in lieu of CNRI's License +Agreement, Licensee may substitute the following text (omitting the +quotes): "Python 1.6.1 is made available subject to the terms and +conditions in CNRI's License Agreement. This Agreement together with +Python 1.6.1 may be located on the Internet using the following +unique, persistent identifier (known as a handle): 1895.22/1013. This +Agreement may also be obtained from a proxy server on the Internet +using the following URL: http://hdl.handle.net/1895.22/1013". + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python 1.6.1 or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python 1.6.1. + +4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS" +basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. This License Agreement shall be governed by the federal +intellectual property law of the United States, including without +limitation the federal copyright law, and, to the extent such +U.S. federal law does not apply, by the law of the Commonwealth of +Virginia, excluding Virginia's conflict of law provisions. +Notwithstanding the foregoing, with regard to derivative works based +on Python 1.6.1 that incorporate non-separable material that was +previously distributed under the GNU General Public License (GPL), the +law of the Commonwealth of Virginia shall govern this License +Agreement only as to issues arising under or with respect to +Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this +License Agreement shall be deemed to create any relationship of +agency, partnership, or joint venture between CNRI and Licensee. This +License Agreement does not grant permission to use CNRI trademarks or +trade name in a trademark sense to endorse or promote products or +services of Licensee, or any third party. + +8. By clicking on the "ACCEPT" button where indicated, or by copying, +installing or otherwise using Python 1.6.1, Licensee agrees to be +bound by the terms and conditions of this License Agreement. + + ACCEPT + + +CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2 +-------------------------------------------------- + +Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, +The Netherlands. All rights reserved. + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of Stichting Mathematisch +Centrum or CWI not be used in advertising or publicity pertaining to +distribution of the software without specific, written prior +permission. + +STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO +THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE +FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/py311/lib/python3.11/site-packages/distlib-0.4.0.dist-info/METADATA b/py311/lib/python3.11/site-packages/distlib-0.4.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..5735826f85e61ef913bee63a217ed0cebaf9cf32 --- /dev/null +++ b/py311/lib/python3.11/site-packages/distlib-0.4.0.dist-info/METADATA @@ -0,0 +1,118 @@ +Metadata-Version: 2.1 +Name: distlib +Version: 0.4.0 +Summary: Distribution utilities +Home-page: https://github.com/pypa/distlib +Author: Vinay Sajip +Author-email: vinay_sajip@red-dove.com +License: PSF-2.0 +Project-URL: Documentation, https://distlib.readthedocs.io/ +Project-URL: Source, https://github.com/pypa/distlib +Project-URL: Tracker, https://github.com/pypa/distlib/issues +Platform: any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Python Software Foundation License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Topic :: Software Development +License-File: LICENSE.txt + +|badge1| |badge2| + +.. |badge1| image:: https://img.shields.io/github/actions/workflow/status/pypa/distlib/package-tests.yml + :alt: GitHub Workflow Status (with event) + +.. |badge2| image:: https://img.shields.io/codecov/c/github/pypa/distlib + :target: https://app.codecov.io/gh/pypa/distlib + :alt: GitHub coverage status + +What is it? +----------- + +Distlib is a library which implements low-level functions that relate to +packaging and distribution of Python software. It is intended to be used as the +basis for third-party packaging tools. The documentation is available at + +https://distlib.readthedocs.io/ + +Main features +------------- + +Distlib currently offers the following features: + +* The package ``distlib.database``, which implements a database of installed + distributions, as defined by :pep:`376`, and distribution dependency graph + logic. Support is also provided for non-installed distributions (i.e. + distributions registered with metadata on an index like PyPI), including + the ability to scan for dependencies and building dependency graphs. +* The package ``distlib.index``, which implements an interface to perform + operations on an index, such as registering a project, uploading a + distribution or uploading documentation. Support is included for verifying + SSL connections (with domain matching) and signing/verifying packages using + GnuPG. +* The package ``distlib.metadata``, which implements distribution metadata as + defined by :pep:`643`, :pep:`566`, :pep:`345`, :pep:`314` and :pep:`241`. +* The package ``distlib.markers``, which implements environment markers as + defined by :pep:`508`. +* The package ``distlib.manifest``, which implements lists of files used + in packaging source distributions. +* The package ``distlib.locators``, which allows finding distributions, whether + on PyPI (XML-RPC or via the "simple" interface), local directories or some + other source. +* The package ``distlib.resources``, which allows access to data files stored + in Python packages, both in the file system and in .zip files. +* The package ``distlib.scripts``, which allows installing of scripts with + adjustment of shebang lines and support for native Windows executable + launchers. +* The package ``distlib.version``, which implements version specifiers as + defined by :pep:`440`, but also support for working with "legacy" versions and + semantic versions. +* The package ``distlib.wheel``, which provides support for building and + installing from the Wheel format for binary distributions (see :pep:`427`). +* The package ``distlib.util``, which contains miscellaneous functions and + classes which are useful in packaging, but which do not fit neatly into + one of the other packages in ``distlib``.* The package implements enhanced + globbing functionality such as the ability to use ``**`` in patterns to + specify recursing into subdirectories. + + +Python version and platform compatibility +----------------------------------------- + +Distlib is intended to be used on and is tested on Python versions 2.7 and 3.6 or later, +pypy-2.7 and pypy3 on Linux, Windows, and macOS. + +Project status +-------------- + +The project has reached a mature status in its development: there is a comprehensive +test suite and it has been exercised on Windows, Ubuntu and macOS. The project is used +by well-known projects such as `pip `_ and `caniusepython3 +`_. + +This project was migrated from Mercurial to Git and from BitBucket to GitHub, and +although all information of importance has been retained across the migration, some +commit references in issues and issue comments may have become invalid. + +Code of Conduct +--------------- + +Everyone interacting in the distlib project's codebases, issue trackers, chat +rooms, and mailing lists is expected to follow the `PyPA Code of Conduct`_. + +.. _PyPA Code of Conduct: https://www.pypa.io/en/latest/code-of-conduct/ + + diff --git a/py311/lib/python3.11/site-packages/distlib-0.4.0.dist-info/RECORD b/py311/lib/python3.11/site-packages/distlib-0.4.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..d00eb42cd08fa1c33bb70f3f0887eda9a9536d03 --- /dev/null +++ b/py311/lib/python3.11/site-packages/distlib-0.4.0.dist-info/RECORD @@ -0,0 +1,26 @@ +distlib-0.4.0.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +distlib-0.4.0.dist-info/LICENSE.txt,sha256=gI4QyKarjesUn_mz-xn0R6gICUYG1xKpylf-rTVSWZ0,14531 +distlib-0.4.0.dist-info/METADATA,sha256=jDrj_J-v8TcG0zV-w3UHd_DFAFSrwCbJwkzliC7x3ec,5246 +distlib-0.4.0.dist-info/RECORD,, +distlib-0.4.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +distlib-0.4.0.dist-info/WHEEL,sha256=z9j0xAa_JmUKMpmz72K0ZGALSM_n-wQVmGbleXx2VHg,110 +distlib-0.4.0.dist-info/top_level.txt,sha256=9BERqitu_vzyeyILOcGzX9YyA2AB_xlC4-81V6xoizk,8 +distlib/__init__.py,sha256=Deo3uo98aUyIfdKJNqofeSEFWwDzrV2QeGLXLsgq0Ag,625 +distlib/compat.py,sha256=2jRSjRI4o-vlXeTK2BCGIUhkc6e9ZGhSsacRM5oseTw,41467 +distlib/database.py,sha256=mHy_LxiXIsIVRb-T0-idBrVLw3Ffij5teHCpbjmJ9YU,51160 +distlib/index.py,sha256=lTbw268rRhj8dw1sib3VZ_0EhSGgoJO3FKJzSFMOaeA,20797 +distlib/locators.py,sha256=oBeAZpFuPQSY09MgNnLfQGGAXXvVO96BFpZyKMuK4tM,51026 +distlib/manifest.py,sha256=3qfmAmVwxRqU1o23AlfXrQGZzh6g_GGzTAP_Hb9C5zQ,14168 +distlib/markers.py,sha256=HsgcqMkOIhKJE6vOu8j9m-SC_Np4iOtj2GXkZ0VSmBk,5269 +distlib/metadata.py,sha256=zil3sg2EUfLXVigljY2d_03IJt-JSs7nX-73fECMX2s,38724 +distlib/resources.py,sha256=LwbPksc0A1JMbi6XnuPdMBUn83X7BPuFNWqPGEKI698,10820 +distlib/scripts.py,sha256=Qvp76E9Jc3IgyYubnpqI9fS7eseGOe4FjpeVKqKt9Iw,18612 +distlib/t32.exe,sha256=a0GV5kCoWsMutvliiCKmIgV98eRZ33wXoS-XrqvJQVs,97792 +distlib/t64-arm.exe,sha256=68TAa32V504xVBnufojh0PcenpR3U4wAqTqf-MZqbPw,182784 +distlib/t64.exe,sha256=gaYY8hy4fbkHYTTnA4i26ct8IQZzkBG2pRdy0iyuBrc,108032 +distlib/util.py,sha256=vMPGvsS4j9hF6Y9k3Tyom1aaHLb0rFmZAEyzeAdel9w,66682 +distlib/version.py,sha256=s5VIs8wBn0fxzGxWM_aA2ZZyx525HcZbMvcTlTyZ3Rg,23727 +distlib/w32.exe,sha256=R4csx3-OGM9kL4aPIzQKRo5TfmRSHZo6QWyLhDhNBks,91648 +distlib/w64-arm.exe,sha256=xdyYhKj0WDcVUOCb05blQYvzdYIKMbmJn2SZvzkcey4,168448 +distlib/w64.exe,sha256=ejGf-rojoBfXseGLpya6bFTFPWRG21X5KvU8J5iU-K0,101888 +distlib/wheel.py,sha256=5hociKVB2EfoUqayljOI_3EIu-RPNJgPG1hsiWEDxJc,44271 diff --git a/py311/lib/python3.11/site-packages/distlib-0.4.0.dist-info/REQUESTED b/py311/lib/python3.11/site-packages/distlib-0.4.0.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/distlib-0.4.0.dist-info/WHEEL b/py311/lib/python3.11/site-packages/distlib-0.4.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..0b18a281107a0448a9980396d9d324ea2aa7a7f8 --- /dev/null +++ b/py311/lib/python3.11/site-packages/distlib-0.4.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/py311/lib/python3.11/site-packages/distlib-0.4.0.dist-info/top_level.txt b/py311/lib/python3.11/site-packages/distlib-0.4.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..f68bb07272ddc517a1ca4c3d4e50ac67bf78d3e2 --- /dev/null +++ b/py311/lib/python3.11/site-packages/distlib-0.4.0.dist-info/top_level.txt @@ -0,0 +1 @@ +distlib diff --git a/py311/lib/python3.11/site-packages/docstring_parser/__init__.py b/py311/lib/python3.11/site-packages/docstring_parser/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d80ca29fba2334cb3978686f55037900aeeb19a3 --- /dev/null +++ b/py311/lib/python3.11/site-packages/docstring_parser/__init__.py @@ -0,0 +1,34 @@ +"""Parse docstrings as per Sphinx notation.""" + +from .common import ( + Docstring, + DocstringDeprecated, + DocstringMeta, + DocstringParam, + DocstringRaises, + DocstringReturns, + DocstringStyle, + ParseError, + RenderingStyle, +) +from .parser import compose, parse, parse_from_object +from .util import combine_docstrings + +Style = DocstringStyle # backwards compatibility + +__all__ = [ + "parse", + "parse_from_object", + "combine_docstrings", + "compose", + "ParseError", + "Docstring", + "DocstringMeta", + "DocstringParam", + "DocstringRaises", + "DocstringReturns", + "DocstringDeprecated", + "DocstringStyle", + "RenderingStyle", + "Style", +] diff --git a/py311/lib/python3.11/site-packages/docstring_parser/__pycache__/__init__.cpython-311.pyc b/py311/lib/python3.11/site-packages/docstring_parser/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0dc2244873e3195414740862656ec801fde51cf7 Binary files /dev/null and b/py311/lib/python3.11/site-packages/docstring_parser/__pycache__/__init__.cpython-311.pyc differ diff --git a/py311/lib/python3.11/site-packages/docstring_parser/__pycache__/attrdoc.cpython-311.pyc b/py311/lib/python3.11/site-packages/docstring_parser/__pycache__/attrdoc.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69359452536f8722ef65a958e62610e8d66be2b7 Binary files /dev/null and b/py311/lib/python3.11/site-packages/docstring_parser/__pycache__/attrdoc.cpython-311.pyc differ diff --git a/py311/lib/python3.11/site-packages/docstring_parser/__pycache__/common.cpython-311.pyc b/py311/lib/python3.11/site-packages/docstring_parser/__pycache__/common.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af0b10a5ba351553cb3b12c21404e9551c84c805 Binary files /dev/null and b/py311/lib/python3.11/site-packages/docstring_parser/__pycache__/common.cpython-311.pyc differ diff --git a/py311/lib/python3.11/site-packages/docstring_parser/__pycache__/epydoc.cpython-311.pyc b/py311/lib/python3.11/site-packages/docstring_parser/__pycache__/epydoc.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ccfb7eb6a6900eada5eb811ce0b3de440cd83e2 Binary files /dev/null and b/py311/lib/python3.11/site-packages/docstring_parser/__pycache__/epydoc.cpython-311.pyc differ diff --git a/py311/lib/python3.11/site-packages/docstring_parser/__pycache__/google.cpython-311.pyc b/py311/lib/python3.11/site-packages/docstring_parser/__pycache__/google.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb0512c05aaee2e96626ed253d607f12937f41c4 Binary files /dev/null and b/py311/lib/python3.11/site-packages/docstring_parser/__pycache__/google.cpython-311.pyc differ diff --git a/py311/lib/python3.11/site-packages/docstring_parser/__pycache__/numpydoc.cpython-311.pyc b/py311/lib/python3.11/site-packages/docstring_parser/__pycache__/numpydoc.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f2767f41ada92778b3dff2fdb5be7dac4ee882e Binary files /dev/null and b/py311/lib/python3.11/site-packages/docstring_parser/__pycache__/numpydoc.cpython-311.pyc differ diff --git a/py311/lib/python3.11/site-packages/docstring_parser/__pycache__/parser.cpython-311.pyc b/py311/lib/python3.11/site-packages/docstring_parser/__pycache__/parser.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67a1e1798d6c1c7703be99e807a03970b2f8747c Binary files /dev/null and b/py311/lib/python3.11/site-packages/docstring_parser/__pycache__/parser.cpython-311.pyc differ diff --git a/py311/lib/python3.11/site-packages/docstring_parser/__pycache__/rest.cpython-311.pyc b/py311/lib/python3.11/site-packages/docstring_parser/__pycache__/rest.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0668113e224db8c197325084b125fd6866683ebe Binary files /dev/null and b/py311/lib/python3.11/site-packages/docstring_parser/__pycache__/rest.cpython-311.pyc differ diff --git a/py311/lib/python3.11/site-packages/docstring_parser/__pycache__/util.cpython-311.pyc b/py311/lib/python3.11/site-packages/docstring_parser/__pycache__/util.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0649b1b4b3512a6218da8e076f1bb3002e7bc14e Binary files /dev/null and b/py311/lib/python3.11/site-packages/docstring_parser/__pycache__/util.cpython-311.pyc differ diff --git a/py311/lib/python3.11/site-packages/docstring_parser/attrdoc.py b/py311/lib/python3.11/site-packages/docstring_parser/attrdoc.py new file mode 100644 index 0000000000000000000000000000000000000000..de84a85d11747bb90d0650f041292d2d1f099e92 --- /dev/null +++ b/py311/lib/python3.11/site-packages/docstring_parser/attrdoc.py @@ -0,0 +1,126 @@ +"""Attribute docstrings parsing. + +.. seealso:: https://peps.python.org/pep-0257/#what-is-a-docstring +""" + +import ast +import inspect +import textwrap +import typing as T +from types import ModuleType + +from .common import Docstring, DocstringParam + + +def ast_get_constant_value(node: ast.AST) -> T.Any: + """Return the constant's value if the given node is a constant.""" + return getattr(node, "value") + + +def ast_unparse(node: ast.AST) -> T.Optional[str]: + """Convert the AST node to source code as a string.""" + if hasattr(ast, "unparse"): + return ast.unparse(node) + # Support simple cases in Python < 3.9 + if isinstance(node, ast.Constant): + return str(ast_get_constant_value(node)) + if isinstance(node, ast.Name): + return node.id + return None + + +def ast_is_literal_str(node: ast.AST) -> bool: + """Return True if the given node is a literal string.""" + return ( + isinstance(node, ast.Expr) + and isinstance(node.value, ast.Constant) + and isinstance(ast_get_constant_value(node.value), str) + ) + + +def ast_get_attribute( + node: ast.AST, +) -> T.Optional[T.Tuple[str, T.Optional[str], T.Optional[str]]]: + """Return name, type and default if the given node is an attribute.""" + if isinstance(node, (ast.Assign, ast.AnnAssign)): + target = ( + node.targets[0] if isinstance(node, ast.Assign) else node.target + ) + if isinstance(target, ast.Name): + type_str = None + if isinstance(node, ast.AnnAssign): + type_str = ast_unparse(node.annotation) + default = None + if node.value: + default = ast_unparse(node.value) + return target.id, type_str, default + return None + + +class AttributeDocstrings(ast.NodeVisitor): + """An ast.NodeVisitor that collects attribute docstrings.""" + + attr_docs = None + prev_attr = None + + def visit(self, node): + if self.prev_attr and ast_is_literal_str(node): + attr_name, attr_type, attr_default = self.prev_attr + self.attr_docs[attr_name] = ( + ast_get_constant_value(node.value), + attr_type, + attr_default, + ) + self.prev_attr = ast_get_attribute(node) + if isinstance(node, (ast.ClassDef, ast.Module)): + self.generic_visit(node) + + def get_attr_docs( + self, component: T.Any + ) -> T.Dict[str, T.Tuple[str, T.Optional[str], T.Optional[str]]]: + """Get attribute docstrings from the given component. + + :param component: component to process (class or module) + :returns: for each attribute docstring, a tuple with (description, + type, default) + """ + self.attr_docs = {} + self.prev_attr = None + try: + source = textwrap.dedent(inspect.getsource(component)) + except OSError: + pass + else: + tree = ast.parse(source) + if inspect.ismodule(component): + self.visit(tree) + elif isinstance(tree, ast.Module) and isinstance( + tree.body[0], ast.ClassDef + ): + self.visit(tree.body[0]) + return self.attr_docs + + +def add_attribute_docstrings( + obj: T.Union[type, ModuleType], docstring: Docstring +) -> None: + """Add attribute docstrings found in the object's source code. + + :param obj: object from which to parse attribute docstrings + :param docstring: Docstring object where found attributes are added + :returns: list with names of added attributes + """ + params = set(p.arg_name for p in docstring.params) + for arg_name, (description, type_name, default) in ( + AttributeDocstrings().get_attr_docs(obj).items() + ): + if arg_name not in params: + param = DocstringParam( + args=["attribute", arg_name], + description=description, + arg_name=arg_name, + type_name=type_name, + is_optional=default is not None, + default=default, + ) + docstring.meta.append(param) diff --git a/py311/lib/python3.11/site-packages/docstring_parser/common.py b/py311/lib/python3.11/site-packages/docstring_parser/common.py new file mode 100644 index 0000000000000000000000000000000000000000..880e37864d59e460e52e5174a6d371c81ea54617 --- /dev/null +++ b/py311/lib/python3.11/site-packages/docstring_parser/common.py @@ -0,0 +1,229 @@ +"""Common methods for parsing.""" + +import enum +import typing as T + +PARAM_KEYWORDS = { + "param", + "parameter", + "arg", + "argument", + "attribute", + "key", + "keyword", +} +RAISES_KEYWORDS = {"raises", "raise", "except", "exception"} +DEPRECATION_KEYWORDS = {"deprecation", "deprecated"} +RETURNS_KEYWORDS = {"return", "returns"} +YIELDS_KEYWORDS = {"yield", "yields"} +EXAMPLES_KEYWORDS = {"example", "examples"} + + +class ParseError(RuntimeError): + """Base class for all parsing related errors.""" + + +class DocstringStyle(enum.Enum): + """Docstring style.""" + + REST = 1 + GOOGLE = 2 + NUMPYDOC = 3 + EPYDOC = 4 + AUTO = 255 + + +class RenderingStyle(enum.Enum): + """Rendering style when unparsing parsed docstrings.""" + + COMPACT = 1 + CLEAN = 2 + EXPANDED = 3 + + +class DocstringMeta: + """Docstring meta information. + + Symbolizes lines in form of + + :param arg: description + :raises ValueError: if something happens + """ + + def __init__( + self, args: T.List[str], description: T.Optional[str] + ) -> None: + """Initialize self. + + :param args: list of arguments. The exact content of this variable is + dependent on the kind of docstring; it's used to distinguish + between custom docstring meta information items. + :param description: associated docstring description. + """ + self.args = args + self.description = description + + +class DocstringParam(DocstringMeta): + """DocstringMeta symbolizing :param metadata.""" + + def __init__( + self, + args: T.List[str], + description: T.Optional[str], + arg_name: str, + type_name: T.Optional[str], + is_optional: T.Optional[bool], + default: T.Optional[str], + ) -> None: + """Initialize self.""" + super().__init__(args, description) + self.arg_name = arg_name + self.type_name = type_name + self.is_optional = is_optional + self.default = default + + +class DocstringReturns(DocstringMeta): + """DocstringMeta symbolizing :returns or :yields metadata.""" + + def __init__( + self, + args: T.List[str], + description: T.Optional[str], + type_name: T.Optional[str], + is_generator: bool, + return_name: T.Optional[str] = None, + ) -> None: + """Initialize self.""" + super().__init__(args, description) + self.type_name = type_name + self.is_generator = is_generator + self.return_name = return_name + + +class DocstringRaises(DocstringMeta): + """DocstringMeta symbolizing :raises metadata.""" + + def __init__( + self, + args: T.List[str], + description: T.Optional[str], + type_name: T.Optional[str], + ) -> None: + """Initialize self.""" + super().__init__(args, description) + self.type_name = type_name + self.description = description + + +class DocstringDeprecated(DocstringMeta): + """DocstringMeta symbolizing deprecation metadata.""" + + def __init__( + self, + args: T.List[str], + description: T.Optional[str], + version: T.Optional[str], + ) -> None: + """Initialize self.""" + super().__init__(args, description) + self.version = version + self.description = description + + +class DocstringExample(DocstringMeta): + """DocstringMeta symbolizing example metadata.""" + + def __init__( + self, + args: T.List[str], + snippet: T.Optional[str], + description: T.Optional[str], + ) -> None: + """Initialize self.""" + super().__init__(args, description) + self.snippet = snippet + self.description = description + + +class Docstring: + """Docstring object representation.""" + + def __init__( + self, + style=None, # type: T.Optional[DocstringStyle] + ) -> None: + """Initialize self.""" + self.short_description = None # type: T.Optional[str] + self.long_description = None # type: T.Optional[str] + self.blank_after_short_description = False + self.blank_after_long_description = False + self.meta = [] # type: T.List[DocstringMeta] + self.style = style # type: T.Optional[DocstringStyle] + + @property + def description(self) -> T.Optional[str]: + """Return the full description of the function + + Returns None if the docstring did not include any description + """ + ret = [] + if self.short_description: + ret.append(self.short_description) + if self.blank_after_short_description: + ret.append("") + if self.long_description: + ret.append(self.long_description) + + if not ret: + return None + + return "\n".join(ret) + + @property + def params(self) -> T.List[DocstringParam]: + """Return a list of information on function params.""" + return [item for item in self.meta if isinstance(item, DocstringParam)] + + @property + def raises(self) -> T.List[DocstringRaises]: + """Return a list of information on the exceptions that the function + may raise. + """ + return [ + item for item in self.meta if isinstance(item, DocstringRaises) + ] + + @property + def returns(self) -> T.Optional[DocstringReturns]: + """Return a single information on function return. + + Takes the first return information. + """ + for item in self.meta: + if isinstance(item, DocstringReturns): + return item + return None + + @property + def many_returns(self) -> T.List[DocstringReturns]: + """Return a list of information on function return.""" + return [ + item for item in self.meta if isinstance(item, DocstringReturns) + ] + + @property + def deprecation(self) -> T.Optional[DocstringDeprecated]: + """Return a single information on function deprecation notes.""" + for item in self.meta: + if isinstance(item, DocstringDeprecated): + return item + return None + + @property + def examples(self) -> T.List[DocstringExample]: + """Return a list of information on function examples.""" + return [ + item for item in self.meta if isinstance(item, DocstringExample) + ] diff --git a/py311/lib/python3.11/site-packages/docstring_parser/epydoc.py b/py311/lib/python3.11/site-packages/docstring_parser/epydoc.py new file mode 100644 index 0000000000000000000000000000000000000000..4d4dde67d308f390a1ac973b77f89c285e6119ad --- /dev/null +++ b/py311/lib/python3.11/site-packages/docstring_parser/epydoc.py @@ -0,0 +1,269 @@ +"""Epyoc-style docstring parsing. + +.. seealso:: http://epydoc.sourceforge.net/manual-fields.html +""" + +import inspect +import re +import typing as T + +from .common import ( + Docstring, + DocstringMeta, + DocstringParam, + DocstringRaises, + DocstringReturns, + DocstringStyle, + ParseError, + RenderingStyle, +) + + +def _clean_str(string: str) -> T.Optional[str]: + string = string.strip() + if len(string) > 0: + return string + return None + + +def parse(text: str) -> Docstring: + """Parse the epydoc-style docstring into its components. + + :returns: parsed docstring + """ + ret = Docstring(style=DocstringStyle.EPYDOC) + if not text: + return ret + + text = inspect.cleandoc(text) + match = re.search("^@", text, flags=re.M) + if match: + desc_chunk = text[: match.start()] + meta_chunk = text[match.start() :] + else: + desc_chunk = text + meta_chunk = "" + + parts = desc_chunk.split("\n", 1) + ret.short_description = parts[0] or None + if len(parts) > 1: + long_desc_chunk = parts[1] or "" + ret.blank_after_short_description = long_desc_chunk.startswith("\n") + ret.blank_after_long_description = long_desc_chunk.endswith("\n\n") + ret.long_description = long_desc_chunk.strip() or None + + param_pattern = re.compile( + r"(param|keyword|type)(\s+[_A-z][_A-z0-9]*\??):" + ) + raise_pattern = re.compile(r"(raise)(\s+[_A-z][_A-z0-9]*\??)?:") + return_pattern = re.compile(r"(return|rtype|yield|ytype):") + meta_pattern = re.compile( + r"([_A-z][_A-z0-9]+)((\s+[_A-z][_A-z0-9]*\??)*):" + ) + + # tokenize + stream: T.List[T.Tuple[str, str, T.List[str], str]] = [] + for match in re.finditer( + r"(^@.*?)(?=^@|\Z)", meta_chunk, flags=re.S | re.M + ): + chunk = match.group(0) + if not chunk: + continue + + param_match = re.search(param_pattern, chunk) + raise_match = re.search(raise_pattern, chunk) + return_match = re.search(return_pattern, chunk) + meta_match = re.search(meta_pattern, chunk) + + match = param_match or raise_match or return_match or meta_match + if not match: + raise ParseError(f'Error parsing meta information near "{chunk}".') + + desc_chunk = chunk[match.end() :] + if param_match: + base = "param" + key: str = match.group(1) + args = [match.group(2).strip()] + elif raise_match: + base = "raise" + key: str = match.group(1) + args = [] if match.group(2) is None else [match.group(2).strip()] + elif return_match: + base = "return" + key: str = match.group(1) + args = [] + else: + base = "meta" + key: str = match.group(1) + token = _clean_str(match.group(2).strip()) + args = [] if token is None else re.split(r"\s+", token) + + # Make sure we didn't match some existing keyword in an incorrect + # way here: + if key in [ + "param", + "keyword", + "type", + "return", + "rtype", + "yield", + "ytype", + ]: + raise ParseError( + f'Error parsing meta information near "{chunk}".' + ) + + desc = desc_chunk.strip() + if "\n" in desc: + first_line, rest = desc.split("\n", 1) + desc = first_line + "\n" + inspect.cleandoc(rest) + stream.append((base, key, args, desc)) + + # Combine type_name, arg_name, and description information + params: T.Dict[str, T.Dict[str, T.Any]] = {} + for base, key, args, desc in stream: + if base not in ["param", "return"]: + continue # nothing to do + + (arg_name,) = args or ("return",) + info = params.setdefault(arg_name, {}) + info_key = "type_name" if "type" in key else "description" + info[info_key] = desc + + if base == "return": + is_generator = key in {"ytype", "yield"} + if info.setdefault("is_generator", is_generator) != is_generator: + raise ParseError( + f'Error parsing meta information for "{arg_name}".' + ) + + is_done: T.Dict[str, bool] = {} + for base, key, args, desc in stream: + if base == "param" and not is_done.get(args[0], False): + (arg_name,) = args + info = params[arg_name] + type_name = info.get("type_name") + + if type_name and type_name.endswith("?"): + is_optional = True + type_name = type_name[:-1] + else: + is_optional = False + + match = re.match(r".*defaults to (.+)", desc, flags=re.DOTALL) + default = match.group(1).rstrip(".") if match else None + + meta_item = DocstringParam( + args=[key, arg_name], + description=info.get("description"), + arg_name=arg_name, + type_name=type_name, + is_optional=is_optional, + default=default, + ) + is_done[arg_name] = True + elif base == "return" and not is_done.get("return", False): + info = params["return"] + meta_item = DocstringReturns( + args=[key], + description=info.get("description"), + type_name=info.get("type_name"), + is_generator=info.get("is_generator", False), + ) + is_done["return"] = True + elif base == "raise": + (type_name,) = args or (None,) + meta_item = DocstringRaises( + args=[key] + args, + description=desc, + type_name=type_name, + ) + elif base == "meta": + meta_item = DocstringMeta( + args=[key] + args, + description=desc, + ) + else: + (key, *_) = args or ("return",) + assert is_done.get(key, False) + continue # don't append + + ret.meta.append(meta_item) + + return ret + + +def compose( + docstring: Docstring, + rendering_style: RenderingStyle = RenderingStyle.COMPACT, + indent: str = " ", +) -> str: + """Render a parsed docstring into docstring text. + + :param docstring: parsed docstring representation + :param rendering_style: the style to render docstrings + :param indent: the characters used as indentation in the docstring string + :returns: docstring text + """ + + def process_desc(desc: T.Optional[str], is_type: bool) -> str: + if not desc: + return "" + + if rendering_style == RenderingStyle.EXPANDED or ( + rendering_style == RenderingStyle.CLEAN and not is_type + ): + (first, *rest) = desc.splitlines() + return "\n".join( + ["\n" + indent + first] + [indent + line for line in rest] + ) + + (first, *rest) = desc.splitlines() + return "\n".join([" " + first] + [indent + line for line in rest]) + + parts: T.List[str] = [] + if docstring.short_description: + parts.append(docstring.short_description) + if docstring.blank_after_short_description: + parts.append("") + if docstring.long_description: + parts.append(docstring.long_description) + if docstring.blank_after_long_description: + parts.append("") + + for meta in docstring.meta: + if isinstance(meta, DocstringParam): + if meta.type_name: + type_name = ( + f"{meta.type_name}?" + if meta.is_optional + else meta.type_name + ) + text = f"@type {meta.arg_name}:" + text += process_desc(type_name, True) + parts.append(text) + text = f"@param {meta.arg_name}:" + process_desc( + meta.description, False + ) + parts.append(text) + elif isinstance(meta, DocstringReturns): + (arg_key, type_key) = ( + ("yield", "ytype") + if meta.is_generator + else ("return", "rtype") + ) + if meta.type_name: + text = f"@{type_key}:" + process_desc(meta.type_name, True) + parts.append(text) + if meta.description: + text = f"@{arg_key}:" + process_desc(meta.description, False) + parts.append(text) + elif isinstance(meta, DocstringRaises): + text = f"@raise {meta.type_name}:" if meta.type_name else "@raise:" + text += process_desc(meta.description, False) + parts.append(text) + else: + text = f'@{" ".join(meta.args)}:' + text += process_desc(meta.description, False) + parts.append(text) + return "\n".join(parts) diff --git a/py311/lib/python3.11/site-packages/docstring_parser/google.py b/py311/lib/python3.11/site-packages/docstring_parser/google.py new file mode 100644 index 0000000000000000000000000000000000000000..aa4284e28a4a43758c7b6a650d38749dbedddf2a --- /dev/null +++ b/py311/lib/python3.11/site-packages/docstring_parser/google.py @@ -0,0 +1,414 @@ +"""Google-style docstring parsing.""" + +import inspect +import re +import typing as T +from collections import OrderedDict, namedtuple +from enum import IntEnum + +from .common import ( + EXAMPLES_KEYWORDS, + PARAM_KEYWORDS, + RAISES_KEYWORDS, + RETURNS_KEYWORDS, + YIELDS_KEYWORDS, + Docstring, + DocstringExample, + DocstringMeta, + DocstringParam, + DocstringRaises, + DocstringReturns, + DocstringStyle, + ParseError, + RenderingStyle, +) + + +class SectionType(IntEnum): + """Types of sections.""" + + SINGULAR = 0 + """For sections like examples.""" + + MULTIPLE = 1 + """For sections like params.""" + + SINGULAR_OR_MULTIPLE = 2 + """For sections like returns or yields.""" + + +class Section(namedtuple("SectionBase", "title key type")): + """A docstring section.""" + + +GOOGLE_TYPED_ARG_REGEX = re.compile(r"\s*(.+?)\s*\(\s*(.*[^\s]+)\s*\)") +GOOGLE_ARG_DESC_REGEX = re.compile(r".*\. Defaults to (.+)\.") +MULTIPLE_PATTERN = re.compile(r"(\s*[^:\s]+:)|([^:]*\]:.*)") + +DEFAULT_SECTIONS = [ + Section("Arguments", "param", SectionType.MULTIPLE), + Section("Args", "param", SectionType.MULTIPLE), + Section("Parameters", "param", SectionType.MULTIPLE), + Section("Params", "param", SectionType.MULTIPLE), + Section("Raises", "raises", SectionType.MULTIPLE), + Section("Exceptions", "raises", SectionType.MULTIPLE), + Section("Except", "raises", SectionType.MULTIPLE), + Section("Attributes", "attribute", SectionType.MULTIPLE), + Section("Example", "examples", SectionType.SINGULAR), + Section("Examples", "examples", SectionType.SINGULAR), + Section("Returns", "returns", SectionType.SINGULAR_OR_MULTIPLE), + Section("Yields", "yields", SectionType.SINGULAR_OR_MULTIPLE), +] + + +class GoogleParser: + """Parser for Google-style docstrings.""" + + def __init__( + self, sections: T.Optional[T.List[Section]] = None, title_colon=True + ): + """Setup sections. + + :param sections: Recognized sections or None to defaults. + :param title_colon: require colon after section title. + """ + if not sections: + sections = DEFAULT_SECTIONS + self.sections = {s.title: s for s in sections} + self.title_colon = title_colon + self._setup() + + def _setup(self): + if self.title_colon: + colon = ":" + else: + colon = "" + self.titles_re = re.compile( + "^(" + + "|".join(f"({t})" for t in self.sections) + + ")" + + colon + + "[ \t\r\f\v]*$", + flags=re.M, + ) + + def _build_meta(self, text: str, title: str) -> DocstringMeta: + """Build docstring element. + + :param text: docstring element text + :param title: title of section containing element + :return: + """ + + section = self.sections[title] + + if ( + section.type == SectionType.SINGULAR_OR_MULTIPLE + and not MULTIPLE_PATTERN.match(text) + ) or section.type == SectionType.SINGULAR: + return self._build_single_meta(section, text) + + if ":" not in text: + raise ParseError(f"Expected a colon in {text!r}.") + + # Split spec and description + before, desc = text.split(":", 1) + + if before and "\n" in before: + # If there is a newline in the first line, clean it up + first_line, rest = before.split("\n", 1) + before = first_line + inspect.cleandoc(rest) + + if desc: + desc = desc[1:] if desc[0] == " " else desc + if "\n" in desc: + first_line, rest = desc.split("\n", 1) + desc = first_line + "\n" + inspect.cleandoc(rest) + desc = desc.strip("\n") + + return self._build_multi_meta(section, before, desc) + + @staticmethod + def _build_single_meta(section: Section, desc: str) -> DocstringMeta: + if section.key in RETURNS_KEYWORDS | YIELDS_KEYWORDS: + return DocstringReturns( + args=[section.key], + description=desc, + type_name=None, + is_generator=section.key in YIELDS_KEYWORDS, + ) + if section.key in RAISES_KEYWORDS: + return DocstringRaises( + args=[section.key], description=desc, type_name=None + ) + if section.key in EXAMPLES_KEYWORDS: + return DocstringExample( + args=[section.key], snippet=None, description=desc + ) + if section.key in PARAM_KEYWORDS: + raise ParseError("Expected paramenter name.") + return DocstringMeta(args=[section.key], description=desc) + + @staticmethod + def _build_multi_meta( + section: Section, before: str, desc: str + ) -> DocstringMeta: + if section.key in PARAM_KEYWORDS: + match = GOOGLE_TYPED_ARG_REGEX.match(before) + if match: + arg_name, type_name = match.group(1, 2) + if type_name.endswith(", optional"): + is_optional = True + type_name = type_name[:-10] + elif type_name.endswith("?"): + is_optional = True + type_name = type_name[:-1] + else: + is_optional = False + else: + arg_name, type_name = before, None + is_optional = None + + match = GOOGLE_ARG_DESC_REGEX.match(desc) + default = match.group(1) if match else None + + return DocstringParam( + args=[section.key, before], + description=desc, + arg_name=arg_name, + type_name=type_name, + is_optional=is_optional, + default=default, + ) + if section.key in RETURNS_KEYWORDS | YIELDS_KEYWORDS: + return DocstringReturns( + args=[section.key, before], + description=desc, + type_name=before, + is_generator=section.key in YIELDS_KEYWORDS, + ) + if section.key in RAISES_KEYWORDS: + return DocstringRaises( + args=[section.key, before], description=desc, type_name=before + ) + return DocstringMeta(args=[section.key, before], description=desc) + + def add_section(self, section: Section): + """Add or replace a section. + + :param section: The new section. + """ + + self.sections[section.title] = section + self._setup() + + def parse(self, text: str) -> Docstring: + """Parse the Google-style docstring into its components. + + :returns: parsed docstring + """ + ret = Docstring(style=DocstringStyle.GOOGLE) + if not text: + return ret + + # Clean according to PEP-0257 + text = inspect.cleandoc(text) + + # Find first title and split on its position + match = self.titles_re.search(text) + if match: + desc_chunk = text[: match.start()] + meta_chunk = text[match.start() :] + else: + desc_chunk = text + meta_chunk = "" + + # Break description into short and long parts + parts = desc_chunk.split("\n", 1) + ret.short_description = parts[0] or None + if len(parts) > 1: + long_desc_chunk = parts[1] or "" + ret.blank_after_short_description = long_desc_chunk.startswith( + "\n" + ) + ret.blank_after_long_description = long_desc_chunk.endswith("\n\n") + ret.long_description = long_desc_chunk.strip() or None + + # Split by sections determined by titles + matches = list(self.titles_re.finditer(meta_chunk)) + if not matches: + return ret + splits = [] + for j in range(len(matches) - 1): + splits.append((matches[j].end(), matches[j + 1].start())) + splits.append((matches[-1].end(), len(meta_chunk))) + + chunks = OrderedDict() # type: T.Mapping[str,str] + for j, (start, end) in enumerate(splits): + title = matches[j].group(1) + if title not in self.sections: + continue + + # Clear Any Unknown Meta + # Ref: https://github.com/rr-/docstring_parser/issues/29 + meta_details = meta_chunk[start:end] + unknown_meta = re.search(r"\n\S", meta_details) + if unknown_meta is not None: + meta_details = meta_details[: unknown_meta.start()] + + chunks[title] = meta_details.strip("\n") + if not chunks: + return ret + + # Add elements from each chunk + for title, chunk in chunks.items(): + # Determine indent + indent_match = re.search(r"^\s*", chunk) + if not indent_match: + raise ParseError(f'Can\'t infer indent from "{chunk}"') + indent = indent_match.group() + + # Check for singular elements + if self.sections[title].type in [ + SectionType.SINGULAR, + SectionType.SINGULAR_OR_MULTIPLE, + ]: + part = inspect.cleandoc(chunk) + ret.meta.append(self._build_meta(part, title)) + continue + + # Split based on lines which have exactly that indent + _re = "^" + indent + r"(?=\S)" + c_matches = list(re.finditer(_re, chunk, flags=re.M)) + if not c_matches: + raise ParseError(f'No specification for "{title}": "{chunk}"') + c_splits = [] + for j in range(len(c_matches) - 1): + c_splits.append((c_matches[j].end(), c_matches[j + 1].start())) + c_splits.append((c_matches[-1].end(), len(chunk))) + for j, (start, end) in enumerate(c_splits): + part = chunk[start:end].strip("\n") + ret.meta.append(self._build_meta(part, title)) + + return ret + + +def parse(text: str) -> Docstring: + """Parse the Google-style docstring into its components. + + :returns: parsed docstring + """ + return GoogleParser().parse(text) + + +def compose( + docstring: Docstring, + rendering_style: RenderingStyle = RenderingStyle.COMPACT, + indent: str = " ", +) -> str: + """Render a parsed docstring into docstring text. + + :param docstring: parsed docstring representation + :param rendering_style: the style to render docstrings + :param indent: the characters used as indentation in the docstring string + :returns: docstring text + """ + + def process_one( + one: T.Union[DocstringParam, DocstringReturns, DocstringRaises], + ): + head = "" + + if isinstance(one, DocstringParam): + head += one.arg_name or "" + elif isinstance(one, DocstringReturns): + head += one.return_name or "" + + if isinstance(one, DocstringParam) and one.is_optional: + optional = ( + "?" + if rendering_style == RenderingStyle.COMPACT + else ", optional" + ) + else: + optional = "" + + if one.type_name and head: + head += f" ({one.type_name}{optional}):" + elif one.type_name: + head += f"{one.type_name}{optional}:" + else: + head += ":" + head = indent + head + + if one.description and rendering_style == RenderingStyle.EXPANDED: + body = f"\n{indent}{indent}".join( + [head] + one.description.splitlines() + ) + parts.append(body) + elif one.description: + (first, *rest) = one.description.splitlines() + body = f"\n{indent}{indent}".join([head + " " + first] + rest) + parts.append(body) + else: + parts.append(head) + + def process_sect(name: str, args: T.List[T.Any]): + if args: + parts.append(name) + for arg in args: + process_one(arg) + parts.append("") + + parts: T.List[str] = [] + if docstring.short_description: + parts.append(docstring.short_description) + if docstring.blank_after_short_description: + parts.append("") + + if docstring.long_description: + parts.append(docstring.long_description) + if docstring.blank_after_long_description: + parts.append("") + + process_sect( + "Args:", [p for p in docstring.params or [] if p.args[0] == "param"] + ) + + process_sect( + "Attributes:", + [p for p in docstring.params or [] if p.args[0] == "attribute"], + ) + + process_sect( + "Returns:", + [p for p in docstring.many_returns or [] if not p.is_generator], + ) + + process_sect( + "Yields:", [p for p in docstring.many_returns or [] if p.is_generator] + ) + + process_sect("Raises:", docstring.raises or []) + + if docstring.returns and not docstring.many_returns: + ret = docstring.returns + parts.append("Yields:" if ret else "Returns:") + parts.append("-" * len(parts[-1])) + process_one(ret) + + for meta in docstring.meta: + if isinstance( + meta, (DocstringParam, DocstringReturns, DocstringRaises) + ): + continue # Already handled + parts.append(meta.args[0].replace("_", "").title() + ":") + if meta.description: + lines = [indent + l for l in meta.description.splitlines()] + parts.append("\n".join(lines)) + parts.append("") + + while parts and not parts[-1]: + parts.pop() + + return "\n".join(parts) diff --git a/py311/lib/python3.11/site-packages/docstring_parser/numpydoc.py b/py311/lib/python3.11/site-packages/docstring_parser/numpydoc.py new file mode 100644 index 0000000000000000000000000000000000000000..836e7766d8faf25e72ec264c4397227fab8d1ffc --- /dev/null +++ b/py311/lib/python3.11/site-packages/docstring_parser/numpydoc.py @@ -0,0 +1,532 @@ +"""Numpydoc-style docstring parsing. + +:see: https://numpydoc.readthedocs.io/en/latest/format.html +""" + +import inspect +import itertools +import re +import typing as T +from textwrap import dedent + +from .common import ( + Docstring, + DocstringDeprecated, + DocstringExample, + DocstringMeta, + DocstringParam, + DocstringRaises, + DocstringReturns, + DocstringStyle, + RenderingStyle, +) + + +def _pairwise(iterable: T.Iterable, end=None) -> T.Iterable: + left, right = itertools.tee(iterable) + next(right, None) + return itertools.zip_longest(left, right, fillvalue=end) + + +def _clean_str(string: str) -> T.Optional[str]: + string = string.strip() + if len(string) > 0: + return string + return None + + +KV_REGEX = re.compile(r"^[^\s].*$", flags=re.M) +PARAM_KEY_REGEX = re.compile(r"^(?P.*?)(?:\s*:\s*(?P.*?))?$") +PARAM_OPTIONAL_REGEX = re.compile(r"(?P.*?)(?:, optional|\(optional\))$") + +# numpydoc format has no formal grammar for this, +# but we can make some educated guesses... +PARAM_DEFAULT_REGEX = re.compile( + r"(?[\w\-\.]*\w)" +) + +RETURN_KEY_REGEX = re.compile(r"^(?:(?P.*?)\s*:\s*)?(?P.*?)$") + + +class Section: + """Numpydoc section parser. + + :param title: section title. For most sections, this is a heading like + "Parameters" which appears on its own line, underlined by + en-dashes ('-') on the following line. + :param key: meta key string. In the parsed ``DocstringMeta`` instance this + will be the first element of the ``args`` attribute list. + """ + + def __init__(self, title: str, key: str) -> None: + self.title = title + self.key = key + + @property + def title_pattern(self) -> str: + """Regular expression pattern matching this section's header. + + This pattern will match this instance's ``title`` attribute in + an anonymous group. + """ + dashes = "-" * len(self.title) + return rf"^({self.title})\s*?\n{dashes}\s*$" + + def parse(self, text: str) -> T.Iterable[DocstringMeta]: + """Parse ``DocstringMeta`` objects from the body of this section. + + :param text: section body text. Should be cleaned with + ``inspect.cleandoc`` before parsing. + """ + yield DocstringMeta([self.key], description=_clean_str(text)) + + +class _KVSection(Section): + """Base parser for numpydoc sections with key-value syntax. + + E.g. sections that look like this: + key + value + key2 : type + values can also span... + ... multiple lines + """ + + def _parse_item(self, key: str, value: str) -> DocstringMeta: + pass + + def parse(self, text: str) -> T.Iterable[DocstringMeta]: + for match, next_match in _pairwise(KV_REGEX.finditer(text)): + start = match.end() + end = next_match.start() if next_match is not None else None + value = text[start:end] + yield self._parse_item( + key=match.group(), value=inspect.cleandoc(value) + ) + + +class _SphinxSection(Section): + """Base parser for numpydoc sections with sphinx-style syntax. + + E.g. sections that look like this: + .. title:: something + possibly over multiple lines + """ + + @property + def title_pattern(self) -> str: + return rf"^\.\.\s*({self.title})\s*::" + + +class ParamSection(_KVSection): + """Parser for numpydoc parameter sections. + + E.g. any section that looks like this: + arg_name + arg_description + arg_2 : type, optional + descriptions can also span... + ... multiple lines + """ + + def _parse_item(self, key: str, value: str) -> DocstringParam: + match = PARAM_KEY_REGEX.match(key) + arg_name = type_name = is_optional = None + if match is not None: + arg_name = match.group("name") + type_name = match.group("type") + if type_name is not None: + optional_match = PARAM_OPTIONAL_REGEX.match(type_name) + if optional_match is not None: + type_name = optional_match.group("type") + is_optional = True + else: + is_optional = False + + default = None + if len(value) > 0: + default_match = PARAM_DEFAULT_REGEX.search(value) + if default_match is not None: + default = default_match.group("value") + + return DocstringParam( + args=[self.key, arg_name], + description=_clean_str(value), + arg_name=arg_name, + type_name=type_name, + is_optional=is_optional, + default=default, + ) + + +class RaisesSection(_KVSection): + """Parser for numpydoc raises sections. + + E.g. any section that looks like this: + ValueError + A description of what might raise ValueError + """ + + def _parse_item(self, key: str, value: str) -> DocstringRaises: + return DocstringRaises( + args=[self.key, key], + description=_clean_str(value), + type_name=key if len(key) > 0 else None, + ) + + +class ReturnsSection(_KVSection): + """Parser for numpydoc returns sections. + + E.g. any section that looks like this: + return_name : type + A description of this returned value + another_type + Return names are optional, types are required + """ + + is_generator = False + + def _parse_item(self, key: str, value: str) -> DocstringReturns: + match = RETURN_KEY_REGEX.match(key) + if match is not None: + return_name = match.group("name") + type_name = match.group("type") + else: + return_name = None + type_name = None + + return DocstringReturns( + args=[self.key], + description=_clean_str(value), + type_name=type_name, + is_generator=self.is_generator, + return_name=return_name, + ) + + +class YieldsSection(ReturnsSection): + """Parser for numpydoc generator "yields" sections.""" + + is_generator = True + + +class DeprecationSection(_SphinxSection): + """Parser for numpydoc "deprecation warning" sections.""" + + def parse(self, text: str) -> T.Iterable[DocstringDeprecated]: + version, desc, *_ = text.split(sep="\n", maxsplit=1) + [None, None] + + if desc is not None: + desc = _clean_str(inspect.cleandoc(desc)) + + yield DocstringDeprecated( + args=[self.key], description=desc, version=_clean_str(version) + ) + + +class ExamplesSection(Section): + """Parser for numpydoc examples sections. + + E.g. any section that looks like this: + >>> import numpy.matlib + >>> np.matlib.empty((2, 2)) # filled with random data + matrix([[ 6.76425276e-320, 9.79033856e-307], # random + [ 7.39337286e-309, 3.22135945e-309]]) + >>> np.matlib.empty((2, 2), dtype=int) + matrix([[ 6600475, 0], # random + [ 6586976, 22740995]]) + """ + + def parse(self, text: str) -> T.Iterable[DocstringMeta]: + """Parse ``DocstringExample`` objects from the body of this section. + + :param text: section body text. Should be cleaned with + ``inspect.cleandoc`` before parsing. + """ + lines = dedent(text).strip().splitlines() + while lines: + snippet_lines = [] + description_lines = [] + while lines: + if not lines[0].startswith(">>>"): + break + snippet_lines.append(lines.pop(0)) + while lines: + if lines[0].startswith(">>>"): + break + description_lines.append(lines.pop(0)) + yield DocstringExample( + [self.key], + snippet="\n".join(snippet_lines) if snippet_lines else None, + description="\n".join(description_lines), + ) + + +DEFAULT_SECTIONS = [ + ParamSection("Parameters", "param"), + ParamSection("Params", "param"), + ParamSection("Arguments", "param"), + ParamSection("Args", "param"), + ParamSection("Other Parameters", "other_param"), + ParamSection("Other Params", "other_param"), + ParamSection("Other Arguments", "other_param"), + ParamSection("Other Args", "other_param"), + ParamSection("Receives", "receives"), + ParamSection("Receive", "receives"), + RaisesSection("Raises", "raises"), + RaisesSection("Raise", "raises"), + RaisesSection("Warns", "warns"), + RaisesSection("Warn", "warns"), + ParamSection("Attributes", "attribute"), + ParamSection("Attribute", "attribute"), + ReturnsSection("Returns", "returns"), + ReturnsSection("Return", "returns"), + YieldsSection("Yields", "yields"), + YieldsSection("Yield", "yields"), + ExamplesSection("Examples", "examples"), + ExamplesSection("Example", "examples"), + Section("Warnings", "warnings"), + Section("Warning", "warnings"), + Section("See Also", "see_also"), + Section("Related", "see_also"), + Section("Notes", "notes"), + Section("Note", "notes"), + Section("References", "references"), + Section("Reference", "references"), + DeprecationSection("deprecated", "deprecation"), +] + + +class NumpydocParser: + """Parser for numpydoc-style docstrings.""" + + def __init__(self, sections: T.Optional[T.Dict[str, Section]] = None): + """Setup sections. + + :param sections: Recognized sections or None to defaults. + """ + sections = sections or DEFAULT_SECTIONS + self.sections = {s.title: s for s in sections} + self._setup() + + def _setup(self): + self.titles_re = re.compile( + r"|".join(s.title_pattern for s in self.sections.values()), + flags=re.M, + ) + + def add_section(self, section: Section): + """Add or replace a section. + + :param section: The new section. + """ + + self.sections[section.title] = section + self._setup() + + def parse(self, text: str) -> Docstring: + """Parse the numpy-style docstring into its components. + + :returns: parsed docstring + """ + ret = Docstring(style=DocstringStyle.NUMPYDOC) + if not text: + return ret + + # Clean according to PEP-0257 + text = inspect.cleandoc(text) + + # Find first title and split on its position + match = self.titles_re.search(text) + if match: + desc_chunk = text[: match.start()] + meta_chunk = text[match.start() :] + else: + desc_chunk = text + meta_chunk = "" + + # Break description into short and long parts + parts = desc_chunk.split("\n", 1) + ret.short_description = parts[0] or None + if len(parts) > 1: + long_desc_chunk = parts[1] or "" + ret.blank_after_short_description = long_desc_chunk.startswith( + "\n" + ) + ret.blank_after_long_description = long_desc_chunk.endswith("\n\n") + ret.long_description = long_desc_chunk.strip() or None + + for match, nextmatch in _pairwise(self.titles_re.finditer(meta_chunk)): + title = next(g for g in match.groups() if g is not None) + factory = self.sections[title] + + # section chunk starts after the header, + # ends at the start of the next header + start = match.end() + end = nextmatch.start() if nextmatch is not None else None + ret.meta.extend(factory.parse(meta_chunk[start:end])) + + return ret + + +def parse(text: str) -> Docstring: + """Parse the numpy-style docstring into its components. + + :returns: parsed docstring + """ + return NumpydocParser().parse(text) + + +def compose( + # pylint: disable=W0613 + docstring: Docstring, + rendering_style: RenderingStyle = RenderingStyle.COMPACT, + indent: str = " ", +) -> str: + """Render a parsed docstring into docstring text. + + :param docstring: parsed docstring representation + :param rendering_style: the style to render docstrings + :param indent: the characters used as indentation in the docstring string + :returns: docstring text + """ + + def process_one( + one: T.Union[DocstringParam, DocstringReturns, DocstringRaises], + ): + if isinstance(one, DocstringParam): + head = one.arg_name + elif isinstance(one, DocstringReturns): + head = one.return_name + else: + head = None + + if one.type_name and head: + head += f" : {one.type_name}" + elif one.type_name: + head = one.type_name + elif not head: + head = "" + + if isinstance(one, DocstringParam) and one.is_optional: + head += ", optional" + + if one.description: + body = f"\n{indent}".join([head] + one.description.splitlines()) + parts.append(body) + else: + parts.append(head) + + def process_sect(name: str, args: T.List[T.Any]): + if args: + parts.append("") + parts.append(name) + parts.append("-" * len(parts[-1])) + for arg in args: + process_one(arg) + + parts: T.List[str] = [] + if docstring.short_description: + parts.append(docstring.short_description) + if docstring.blank_after_short_description: + parts.append("") + + if docstring.deprecation: + first = ".. deprecated::" + if docstring.deprecation.version: + first += f" {docstring.deprecation.version}" + if docstring.deprecation.description: + rest = docstring.deprecation.description.splitlines() + else: + rest = [] + sep = f"\n{indent}" + parts.append(sep.join([first] + rest)) + + if docstring.long_description: + parts.append(docstring.long_description) + if docstring.blank_after_long_description: + parts.append("") + + process_sect( + "Parameters", + [item for item in docstring.params or [] if item.args[0] == "param"], + ) + + process_sect( + "Attributes", + [ + item + for item in docstring.params or [] + if item.args[0] == "attribute" + ], + ) + + process_sect( + "Returns", + [ + item + for item in docstring.many_returns or [] + if not item.is_generator + ], + ) + + process_sect( + "Yields", + [item for item in docstring.many_returns or [] if item.is_generator], + ) + + if docstring.returns and not docstring.many_returns: + ret = docstring.returns + parts.append("Yields" if ret else "Returns") + parts.append("-" * len(parts[-1])) + process_one(ret) + + process_sect( + "Receives", + [ + item + for item in docstring.params or [] + if item.args[0] == "receives" + ], + ) + + process_sect( + "Other Parameters", + [ + item + for item in docstring.params or [] + if item.args[0] == "other_param" + ], + ) + + process_sect( + "Raises", + [item for item in docstring.raises or [] if item.args[0] == "raises"], + ) + + process_sect( + "Warns", + [item for item in docstring.raises or [] if item.args[0] == "warns"], + ) + + for meta in docstring.meta: + if isinstance( + meta, + ( + DocstringDeprecated, + DocstringParam, + DocstringReturns, + DocstringRaises, + ), + ): + continue # Already handled + + parts.append("") + parts.append(meta.args[0].replace("_", "").title()) + parts.append("-" * len(meta.args[0])) + + if meta.description: + parts.append(meta.description) + + return "\n".join(parts) diff --git a/py311/lib/python3.11/site-packages/docstring_parser/parser.py b/py311/lib/python3.11/site-packages/docstring_parser/parser.py new file mode 100644 index 0000000000000000000000000000000000000000..2710e6330da4180d2e20890692f79812fb771024 --- /dev/null +++ b/py311/lib/python3.11/site-packages/docstring_parser/parser.py @@ -0,0 +1,98 @@ +"""The main parsing routine.""" + +import inspect +import typing as T + +from docstring_parser import epydoc, google, numpydoc, rest +from docstring_parser.attrdoc import add_attribute_docstrings +from docstring_parser.common import ( + Docstring, + DocstringStyle, + ParseError, + RenderingStyle, +) + +_STYLE_MAP = { + DocstringStyle.REST: rest, + DocstringStyle.GOOGLE: google, + DocstringStyle.NUMPYDOC: numpydoc, + DocstringStyle.EPYDOC: epydoc, +} + + +def parse(text: str, style: DocstringStyle = DocstringStyle.AUTO) -> Docstring: + """Parse the docstring into its components. + + :param text: docstring text to parse + :param style: docstring style + :returns: parsed docstring representation + """ + if style != DocstringStyle.AUTO: + return _STYLE_MAP[style].parse(text) + + exc: T.Optional[Exception] = None + rets = [] + for module in _STYLE_MAP.values(): + try: + ret = module.parse(text) + except ParseError as ex: + exc = ex + else: + rets.append(ret) + + if not rets: + raise exc + + return sorted(rets, key=lambda d: len(d.meta), reverse=True)[0] + + +def parse_from_object( + obj: T.Any, + style: DocstringStyle = DocstringStyle.AUTO, +) -> Docstring: + """Parse the object's docstring(s) into its components. + + The object can be anything that has a ``__doc__`` attribute. In contrast to + the ``parse`` function, ``parse_from_object`` is able to parse attribute + docstrings which are defined in the source code instead of ``__doc__``. + + Currently only attribute docstrings defined at class and module levels are + supported. Attribute docstrings defined in ``__init__`` methods are not + supported. + + When given a class, only the attribute docstrings of that class are parsed, + not its inherited classes. This is a design decision. Separate calls to + this function should be performed to get attribute docstrings of parent + classes. + + :param obj: object from which to parse the docstring(s) + :param style: docstring style + :returns: parsed docstring representation + """ + docstring = parse(obj.__doc__, style=style) + + if inspect.isclass(obj) or inspect.ismodule(obj): + add_attribute_docstrings(obj, docstring) + + return docstring + + +def compose( + docstring: Docstring, + style: DocstringStyle = DocstringStyle.AUTO, + rendering_style: RenderingStyle = RenderingStyle.COMPACT, + indent: str = " ", +) -> str: + """Render a parsed docstring into docstring text. + + :param docstring: parsed docstring representation + :param style: docstring style to render + :param indent: the characters used as indentation in the docstring string + :returns: docstring text + """ + module = _STYLE_MAP[ + docstring.style if style == DocstringStyle.AUTO else style + ] + return module.compose( + docstring, rendering_style=rendering_style, indent=indent + ) diff --git a/py311/lib/python3.11/site-packages/docstring_parser/py.typed b/py311/lib/python3.11/site-packages/docstring_parser/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..1242d43277017748bbbf3d0a104ec4cf78245822 --- /dev/null +++ b/py311/lib/python3.11/site-packages/docstring_parser/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. diff --git a/py311/lib/python3.11/site-packages/docstring_parser/rest.py b/py311/lib/python3.11/site-packages/docstring_parser/rest.py new file mode 100644 index 0000000000000000000000000000000000000000..b707df87895c0517aa395d586c512ea35a75281f --- /dev/null +++ b/py311/lib/python3.11/site-packages/docstring_parser/rest.py @@ -0,0 +1,259 @@ +"""ReST-style docstring parsing.""" + +import inspect +import re +import typing as T + +from .common import ( + DEPRECATION_KEYWORDS, + PARAM_KEYWORDS, + RAISES_KEYWORDS, + RETURNS_KEYWORDS, + YIELDS_KEYWORDS, + Docstring, + DocstringDeprecated, + DocstringMeta, + DocstringParam, + DocstringRaises, + DocstringReturns, + DocstringStyle, + ParseError, + RenderingStyle, +) + + +def _build_meta(args: T.List[str], desc: str) -> DocstringMeta: + key = args[0] + + if key in PARAM_KEYWORDS: + if len(args) == 3: + key, type_name, arg_name = args + if type_name.endswith("?"): + is_optional = True + type_name = type_name[:-1] + else: + is_optional = False + elif len(args) == 2: + key, arg_name = args + type_name = None + is_optional = None + else: + raise ParseError( + f"Expected one or two arguments for a {key} keyword." + ) + + match = re.match(r".*defaults to (.+)", desc, flags=re.DOTALL) + default = match.group(1).rstrip(".") if match else None + + return DocstringParam( + args=args, + description=desc, + arg_name=arg_name, + type_name=type_name, + is_optional=is_optional, + default=default, + ) + + if key in RETURNS_KEYWORDS | YIELDS_KEYWORDS: + if len(args) == 2: + type_name = args[1] + elif len(args) == 1: + type_name = None + else: + raise ParseError( + f"Expected one or no arguments for a {key} keyword." + ) + + return DocstringReturns( + args=args, + description=desc, + type_name=type_name, + is_generator=key in YIELDS_KEYWORDS, + ) + + if key in DEPRECATION_KEYWORDS: + match = re.search( + r"^(?Pv?((?:\d+)(?:\.[0-9a-z\.]+))) (?P.+)", + desc, + flags=re.I, + ) + return DocstringDeprecated( + args=args, + version=match.group("version") if match else None, + description=match.group("desc") if match else desc, + ) + + if key in RAISES_KEYWORDS: + if len(args) == 2: + type_name = args[1] + elif len(args) == 1: + type_name = None + else: + raise ParseError( + f"Expected one or no arguments for a {key} keyword." + ) + return DocstringRaises( + args=args, description=desc, type_name=type_name + ) + + return DocstringMeta(args=args, description=desc) + + +def parse(text: str) -> Docstring: + """Parse the ReST-style docstring into its components. + + :returns: parsed docstring + """ + ret = Docstring(style=DocstringStyle.REST) + if not text: + return ret + + text = inspect.cleandoc(text) + match = re.search("^:", text, flags=re.M) + if match: + desc_chunk = text[: match.start()] + meta_chunk = text[match.start() :] + else: + desc_chunk = text + meta_chunk = "" + + parts = desc_chunk.split("\n", 1) + ret.short_description = parts[0] or None + if len(parts) > 1: + long_desc_chunk = parts[1] or "" + ret.blank_after_short_description = long_desc_chunk.startswith("\n") + ret.blank_after_long_description = long_desc_chunk.endswith("\n\n") + ret.long_description = long_desc_chunk.strip() or None + + types = {} + rtypes = {} + for match in re.finditer( + r"(^:.*?)(?=^:|\Z)", meta_chunk, flags=re.S | re.M + ): + chunk = match.group(0) + if not chunk: + continue + try: + args_chunk, desc_chunk = chunk.lstrip(":").split(":", 1) + except ValueError as ex: + raise ParseError( + f'Error parsing meta information near "{chunk}".' + ) from ex + args = args_chunk.split() + desc = desc_chunk.strip() + + if "\n" in desc: + first_line, rest = desc.split("\n", 1) + desc = first_line + "\n" + inspect.cleandoc(rest) + + # Add special handling for :type a: typename + if len(args) == 2 and args[0] == "type": + types[args[1]] = desc + elif len(args) in [1, 2] and args[0] == "rtype": + rtypes[None if len(args) == 1 else args[1]] = desc + else: + ret.meta.append(_build_meta(args, desc)) + + for meta in ret.meta: + if isinstance(meta, DocstringParam): + meta.type_name = meta.type_name or types.get(meta.arg_name) + elif isinstance(meta, DocstringReturns): + meta.type_name = meta.type_name or rtypes.get(meta.return_name) + + if not any(isinstance(m, DocstringReturns) for m in ret.meta) and rtypes: + for return_name, type_name in rtypes.items(): + ret.meta.append( + DocstringReturns( + args=[], + type_name=type_name, + description=None, + is_generator=False, + return_name=return_name, + ) + ) + + return ret + + +def compose( + docstring: Docstring, + rendering_style: RenderingStyle = RenderingStyle.COMPACT, + indent: str = " ", +) -> str: + """Render a parsed docstring into docstring text. + + :param docstring: parsed docstring representation + :param rendering_style: the style to render docstrings + :param indent: the characters used as indentation in the docstring string + :returns: docstring text + """ + + def process_desc(desc: T.Optional[str]) -> str: + if not desc: + return "" + + if rendering_style == RenderingStyle.CLEAN: + (first, *rest) = desc.splitlines() + return "\n".join([" " + first] + [indent + line for line in rest]) + + if rendering_style == RenderingStyle.EXPANDED: + (first, *rest) = desc.splitlines() + return "\n".join( + ["\n" + indent + first] + [indent + line for line in rest] + ) + + return " " + desc + + parts: T.List[str] = [] + if docstring.short_description: + parts.append(docstring.short_description) + if docstring.blank_after_short_description: + parts.append("") + if docstring.long_description: + parts.append(docstring.long_description) + if docstring.blank_after_long_description: + parts.append("") + + for meta in docstring.meta: + if isinstance(meta, DocstringParam): + if meta.type_name: + type_text = ( + f" {meta.type_name}? " + if meta.is_optional + else f" {meta.type_name} " + ) + else: + type_text = " " + if rendering_style == RenderingStyle.EXPANDED: + text = f":param {meta.arg_name}:" + text += process_desc(meta.description) + parts.append(text) + if type_text[:-1]: + parts.append(f":type {meta.arg_name}:{type_text[:-1]}") + else: + text = f":param{type_text}{meta.arg_name}:" + text += process_desc(meta.description) + parts.append(text) + elif isinstance(meta, DocstringReturns): + type_text = f" {meta.type_name}" if meta.type_name else "" + key = "yields" if meta.is_generator else "returns" + + if rendering_style == RenderingStyle.EXPANDED: + if meta.description: + text = f":{key}:" + text += process_desc(meta.description) + parts.append(text) + if type_text: + parts.append(f":rtype:{type_text}") + else: + text = f":{key}{type_text}:" + text += process_desc(meta.description) + parts.append(text) + elif isinstance(meta, DocstringRaises): + type_text = f" {meta.type_name} " if meta.type_name else "" + text = f":raises{type_text}:" + process_desc(meta.description) + parts.append(text) + else: + text = f':{" ".join(meta.args)}:' + process_desc(meta.description) + parts.append(text) + return "\n".join(parts) diff --git a/py311/lib/python3.11/site-packages/docstring_parser/tests/__init__.py b/py311/lib/python3.11/site-packages/docstring_parser/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..164665e94d8456d16cb55bbf24dd23cb0978b9c2 --- /dev/null +++ b/py311/lib/python3.11/site-packages/docstring_parser/tests/__init__.py @@ -0,0 +1 @@ +"""Tests for docstring parser.""" diff --git a/py311/lib/python3.11/site-packages/docstring_parser/tests/_pydoctor.py b/py311/lib/python3.11/site-packages/docstring_parser/tests/_pydoctor.py new file mode 100644 index 0000000000000000000000000000000000000000..5f3532a47526478e8511301871f79fadd779b3b9 --- /dev/null +++ b/py311/lib/python3.11/site-packages/docstring_parser/tests/_pydoctor.py @@ -0,0 +1,22 @@ +"""Private pydoctor customization code in order to exclude the package +docstring_parser.tests from the API documentation. Based on Twisted code. +""" + +# pylint: disable=invalid-name + +try: + from pydoctor.model import Documentable, PrivacyClass, System +except ImportError: + pass +else: + + class HidesTestsPydoctorSystem(System): + """A PyDoctor "system" used to generate the docs.""" + + def privacyClass(self, documentable: Documentable) -> PrivacyClass: + """Report the privacy level for an object. Hide the module + 'docstring_parser.tests'. + """ + if documentable.fullName().startswith("docstring_parser.tests"): + return PrivacyClass.HIDDEN + return super().privacyClass(documentable) diff --git a/py311/lib/python3.11/site-packages/docstring_parser/tests/test_epydoc.py b/py311/lib/python3.11/site-packages/docstring_parser/tests/test_epydoc.py new file mode 100644 index 0000000000000000000000000000000000000000..965b440bfc0a9b2b46c7c6acff697424d1224376 --- /dev/null +++ b/py311/lib/python3.11/site-packages/docstring_parser/tests/test_epydoc.py @@ -0,0 +1,724 @@ +"""Tests for epydoc-style docstring routines.""" + +import typing as T + +import pytest +from docstring_parser.common import ParseError, RenderingStyle +from docstring_parser.epydoc import compose, parse + + +@pytest.mark.parametrize( + "source, expected", + [ + ("", None), + ("\n", None), + ("Short description", "Short description"), + ("\nShort description\n", "Short description"), + ("\n Short description\n", "Short description"), + ], +) +def test_short_description(source: str, expected: str) -> None: + """Test parsing short description.""" + docstring = parse(source) + assert docstring.short_description == expected + assert docstring.long_description is None + assert not docstring.meta + + +@pytest.mark.parametrize( + "source, expected_short_desc, expected_long_desc, expected_blank", + [ + ( + "Short description\n\nLong description", + "Short description", + "Long description", + True, + ), + ( + """ + Short description + + Long description + """, + "Short description", + "Long description", + True, + ), + ( + """ + Short description + + Long description + Second line + """, + "Short description", + "Long description\nSecond line", + True, + ), + ( + "Short description\nLong description", + "Short description", + "Long description", + False, + ), + ( + """ + Short description + Long description + """, + "Short description", + "Long description", + False, + ), + ( + "\nShort description\nLong description\n", + "Short description", + "Long description", + False, + ), + ( + """ + Short description + Long description + Second line + """, + "Short description", + "Long description\nSecond line", + False, + ), + ], +) +def test_long_description( + source: str, + expected_short_desc: str, + expected_long_desc: str, + expected_blank: bool, +) -> None: + """Test parsing long description.""" + docstring = parse(source) + assert docstring.short_description == expected_short_desc + assert docstring.long_description == expected_long_desc + assert docstring.blank_after_short_description == expected_blank + assert not docstring.meta + + +@pytest.mark.parametrize( + "source, expected_short_desc, expected_long_desc, " + "expected_blank_short_desc, expected_blank_long_desc", + [ + ( + """ + Short description + @meta: asd + """, + "Short description", + None, + False, + False, + ), + ( + """ + Short description + Long description + @meta: asd + """, + "Short description", + "Long description", + False, + False, + ), + ( + """ + Short description + First line + Second line + @meta: asd + """, + "Short description", + "First line\n Second line", + False, + False, + ), + ( + """ + Short description + + First line + Second line + @meta: asd + """, + "Short description", + "First line\n Second line", + True, + False, + ), + ( + """ + Short description + + First line + Second line + + @meta: asd + """, + "Short description", + "First line\n Second line", + True, + True, + ), + ( + """ + @meta: asd + """, + None, + None, + False, + False, + ), + ], +) +def test_meta_newlines( + source: str, + expected_short_desc: T.Optional[str], + expected_long_desc: T.Optional[str], + expected_blank_short_desc: bool, + expected_blank_long_desc: bool, +) -> None: + """Test parsing newlines around description sections.""" + docstring = parse(source) + assert docstring.short_description == expected_short_desc + assert docstring.long_description == expected_long_desc + assert docstring.blank_after_short_description == expected_blank_short_desc + assert docstring.blank_after_long_description == expected_blank_long_desc + assert len(docstring.meta) == 1 + + +def test_meta_with_multiline_description() -> None: + """Test parsing multiline meta documentation.""" + docstring = parse( + """ + Short description + + @meta: asd + 1 + 2 + 3 + """ + ) + assert docstring.short_description == "Short description" + assert len(docstring.meta) == 1 + assert docstring.meta[0].args == ["meta"] + assert docstring.meta[0].description == "asd\n1\n 2\n3" + + +def test_multiple_meta() -> None: + """Test parsing multiple meta.""" + docstring = parse( + """ + Short description + + @meta1: asd + 1 + 2 + 3 + @meta2: herp + @meta3: derp + """ + ) + assert docstring.short_description == "Short description" + assert len(docstring.meta) == 3 + assert docstring.meta[0].args == ["meta1"] + assert docstring.meta[0].description == "asd\n1\n 2\n3" + assert docstring.meta[1].args == ["meta2"] + assert docstring.meta[1].description == "herp" + assert docstring.meta[2].args == ["meta3"] + assert docstring.meta[2].description == "derp" + + +def test_meta_with_args() -> None: + """Test parsing meta with additional arguments.""" + docstring = parse( + """ + Short description + + @meta ene due rabe: asd + """ + ) + assert docstring.short_description == "Short description" + assert len(docstring.meta) == 1 + assert docstring.meta[0].args == ["meta", "ene", "due", "rabe"] + assert docstring.meta[0].description == "asd" + + +def test_params() -> None: + """Test parsing params.""" + docstring = parse("Short description") + assert len(docstring.params) == 0 + + docstring = parse( + """ + Short description + + @param name: description 1 + @param priority: description 2 + @type priority: int + @param sender: description 3 + @type sender: str? + @param message: description 4, defaults to 'hello' + @type message: str? + @param multiline: long description 5, + defaults to 'bye' + @type multiline: str? + """ + ) + assert len(docstring.params) == 5 + assert docstring.params[0].arg_name == "name" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == "description 1" + assert docstring.params[0].default is None + assert not docstring.params[0].is_optional + assert docstring.params[1].arg_name == "priority" + assert docstring.params[1].type_name == "int" + assert docstring.params[1].description == "description 2" + assert not docstring.params[1].is_optional + assert docstring.params[1].default is None + assert docstring.params[2].arg_name == "sender" + assert docstring.params[2].type_name == "str" + assert docstring.params[2].description == "description 3" + assert docstring.params[2].is_optional + assert docstring.params[2].default is None + assert docstring.params[3].arg_name == "message" + assert docstring.params[3].type_name == "str" + assert ( + docstring.params[3].description == "description 4, defaults to 'hello'" + ) + assert docstring.params[3].is_optional + assert docstring.params[3].default == "'hello'" + assert docstring.params[4].arg_name == "multiline" + assert docstring.params[4].type_name == "str" + assert ( + docstring.params[4].description + == "long description 5,\ndefaults to 'bye'" + ) + assert docstring.params[4].is_optional + assert docstring.params[4].default == "'bye'" + + +def test_returns() -> None: + """Test parsing returns.""" + docstring = parse( + """ + Short description + """ + ) + assert docstring.returns is None + + docstring = parse( + """ + Short description + @return: description + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name is None + assert docstring.returns.description == "description" + assert not docstring.returns.is_generator + + docstring = parse( + """ + Short description + @return: description + @rtype: int + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name == "int" + assert docstring.returns.description == "description" + assert not docstring.returns.is_generator + + +def test_yields() -> None: + """Test parsing yields.""" + docstring = parse( + """ + Short description + """ + ) + assert docstring.returns is None + + docstring = parse( + """ + Short description + @yield: description + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name is None + assert docstring.returns.description == "description" + assert docstring.returns.is_generator + + docstring = parse( + """ + Short description + @yield: description + @ytype: int + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name == "int" + assert docstring.returns.description == "description" + assert docstring.returns.is_generator + + +def test_raises() -> None: + """Test parsing raises.""" + docstring = parse( + """ + Short description + """ + ) + assert len(docstring.raises) == 0 + + docstring = parse( + """ + Short description + @raise: description + """ + ) + assert len(docstring.raises) == 1 + assert docstring.raises[0].type_name is None + assert docstring.raises[0].description == "description" + + docstring = parse( + """ + Short description + @raise ValueError: description + """ + ) + assert len(docstring.raises) == 1 + assert docstring.raises[0].type_name == "ValueError" + assert docstring.raises[0].description == "description" + + +def test_broken_meta() -> None: + """Test parsing broken meta.""" + with pytest.raises(ParseError): + parse("@") + + with pytest.raises(ParseError): + parse("@param herp derp") + + with pytest.raises(ParseError): + parse("@param: invalid") + + with pytest.raises(ParseError): + parse("@param with too many args: desc") + + # these should not raise any errors + parse("@sthstrange: desc") + + +@pytest.mark.parametrize( + "source, expected", + [ + ("", ""), + ("\n", ""), + ("Short description", "Short description"), + ("\nShort description\n", "Short description"), + ("\n Short description\n", "Short description"), + ( + "Short description\n\nLong description", + "Short description\n\nLong description", + ), + ( + """ + Short description + + Long description + """, + "Short description\n\nLong description", + ), + ( + """ + Short description + + Long description + Second line + """, + "Short description\n\nLong description\nSecond line", + ), + ( + "Short description\nLong description", + "Short description\nLong description", + ), + ( + """ + Short description + Long description + """, + "Short description\nLong description", + ), + ( + "\nShort description\nLong description\n", + "Short description\nLong description", + ), + ( + """ + Short description + Long description + Second line + """, + "Short description\nLong description\nSecond line", + ), + ( + """ + Short description + @meta: asd + """, + "Short description\n@meta: asd", + ), + ( + """ + Short description + Long description + @meta: asd + """, + "Short description\nLong description\n@meta: asd", + ), + ( + """ + Short description + First line + Second line + @meta: asd + """, + "Short description\nFirst line\n Second line\n@meta: asd", + ), + ( + """ + Short description + + First line + Second line + @meta: asd + """, + "Short description\n" + "\n" + "First line\n" + " Second line\n" + "@meta: asd", + ), + ( + """ + Short description + + First line + Second line + + @meta: asd + """, + "Short description\n" + "\n" + "First line\n" + " Second line\n" + "\n" + "@meta: asd", + ), + ( + """ + @meta: asd + """, + "@meta: asd", + ), + ( + """ + Short description + + @meta: asd + 1 + 2 + 3 + """, + "Short description\n" + "\n" + "@meta: asd\n" + " 1\n" + " 2\n" + " 3", + ), + ( + """ + Short description + + @meta1: asd + 1 + 2 + 3 + @meta2: herp + @meta3: derp + """, + "Short description\n" + "\n@meta1: asd\n" + " 1\n" + " 2\n" + " 3\n@meta2: herp\n" + "@meta3: derp", + ), + ( + """ + Short description + + @meta ene due rabe: asd + """, + "Short description\n\n@meta ene due rabe: asd", + ), + ( + """ + Short description + + @param name: description 1 + @param priority: description 2 + @type priority: int + @param sender: description 3 + @type sender: str? + @type message: str? + @param message: description 4, defaults to 'hello' + @type multiline: str? + @param multiline: long description 5, + defaults to 'bye' + """, + "Short description\n" + "\n" + "@param name: description 1\n" + "@type priority: int\n" + "@param priority: description 2\n" + "@type sender: str?\n" + "@param sender: description 3\n" + "@type message: str?\n" + "@param message: description 4, defaults to 'hello'\n" + "@type multiline: str?\n" + "@param multiline: long description 5,\n" + " defaults to 'bye'", + ), + ( + """ + Short description + @raise: description + """, + "Short description\n@raise: description", + ), + ( + """ + Short description + @raise ValueError: description + """, + "Short description\n@raise ValueError: description", + ), + ], +) +def test_compose(source: str, expected: str) -> None: + """Test compose in default mode.""" + assert compose(parse(source)) == expected + + +@pytest.mark.parametrize( + "source, expected", + [ + ( + """ + Short description + + @param name: description 1 + @param priority: description 2 + @type priority: int + @param sender: description 3 + @type sender: str? + @type message: str? + @param message: description 4, defaults to 'hello' + @type multiline: str? + @param multiline: long description 5, + defaults to 'bye' + """, + "Short description\n" + "\n" + "@param name:\n" + " description 1\n" + "@type priority: int\n" + "@param priority:\n" + " description 2\n" + "@type sender: str?\n" + "@param sender:\n" + " description 3\n" + "@type message: str?\n" + "@param message:\n" + " description 4, defaults to 'hello'\n" + "@type multiline: str?\n" + "@param multiline:\n" + " long description 5,\n" + " defaults to 'bye'", + ), + ], +) +def test_compose_clean(source: str, expected: str) -> None: + """Test compose in clean mode.""" + assert ( + compose(parse(source), rendering_style=RenderingStyle.CLEAN) + == expected + ) + + +@pytest.mark.parametrize( + "source, expected", + [ + ( + """ + Short description + + @param name: description 1 + @param priority: description 2 + @type priority: int + @param sender: description 3 + @type sender: str? + @type message: str? + @param message: description 4, defaults to 'hello' + @type multiline: str? + @param multiline: long description 5, + defaults to 'bye' + """, + "Short description\n" + "\n" + "@param name:\n" + " description 1\n" + "@type priority:\n" + " int\n" + "@param priority:\n" + " description 2\n" + "@type sender:\n" + " str?\n" + "@param sender:\n" + " description 3\n" + "@type message:\n" + " str?\n" + "@param message:\n" + " description 4, defaults to 'hello'\n" + "@type multiline:\n" + " str?\n" + "@param multiline:\n" + " long description 5,\n" + " defaults to 'bye'", + ), + ], +) +def test_compose_expanded(source: str, expected: str) -> None: + """Test compose in expanded mode.""" + assert ( + compose(parse(source), rendering_style=RenderingStyle.EXPANDED) + == expected + ) + + +def test_short_rtype() -> None: + """Test abbreviated docstring with only return type information.""" + string = "Short description.\n\n@rtype: float" + docstring = parse(string) + assert compose(docstring) == string diff --git a/py311/lib/python3.11/site-packages/docstring_parser/tests/test_google.py b/py311/lib/python3.11/site-packages/docstring_parser/tests/test_google.py new file mode 100644 index 0000000000000000000000000000000000000000..b32c04e470e04181479ae50cd8b2d10f0457bf5a --- /dev/null +++ b/py311/lib/python3.11/site-packages/docstring_parser/tests/test_google.py @@ -0,0 +1,1000 @@ +"""Tests for Google-style docstring routines.""" + +import typing as T + +import pytest +from docstring_parser.common import ParseError, RenderingStyle +from docstring_parser.google import ( + GoogleParser, + Section, + SectionType, + compose, + parse, +) + + +def test_google_parser_unknown_section() -> None: + """Test parsing an unknown section with default GoogleParser + configuration. + """ + parser = GoogleParser() + docstring = parser.parse( + """ + Unknown: + spam: a + """ + ) + assert docstring.short_description == "Unknown:" + assert docstring.long_description == "spam: a" + assert len(docstring.meta) == 0 + + +def test_google_parser_multi_line_parameter_type() -> None: + """Test parsing a multi-line parameter type with default GoogleParser""" + parser = GoogleParser() + docstring = parser.parse( + """Description of the function. + + Args: + output_type (Literal["searchResults", "sourcedAnswer", + "structured"]): The type of output. + This can be one of the following: + - "searchResults": Represents the search results. + - "sourcedAnswer": Represents a sourced answer. + - "structured": Represents a structured output format. + + Returns: + bool: Indicates success or failure. + + """ + ) + assert docstring.params[0].arg_name == "output_type" + + +def test_google_parser_custom_sections() -> None: + """Test parsing an unknown section with custom GoogleParser + configuration. + """ + parser = GoogleParser( + [ + Section("DESCRIPTION", "desc", SectionType.SINGULAR), + Section("ARGUMENTS", "param", SectionType.MULTIPLE), + Section("ATTRIBUTES", "attribute", SectionType.MULTIPLE), + Section("EXAMPLES", "examples", SectionType.SINGULAR), + ], + title_colon=False, + ) + docstring = parser.parse( + """ + DESCRIPTION + This is the description. + + ARGUMENTS + arg1: first arg + arg2: second arg + + ATTRIBUTES + attr1: first attribute + attr2: second attribute + + EXAMPLES + Many examples + More examples + """ + ) + + assert docstring.short_description is None + assert docstring.long_description is None + assert len(docstring.meta) == 6 + assert docstring.meta[0].args == ["desc"] + assert docstring.meta[0].description == "This is the description." + assert docstring.meta[1].args == ["param", "arg1"] + assert docstring.meta[1].description == "first arg" + assert docstring.meta[2].args == ["param", "arg2"] + assert docstring.meta[2].description == "second arg" + assert docstring.meta[3].args == ["attribute", "attr1"] + assert docstring.meta[3].description == "first attribute" + assert docstring.meta[4].args == ["attribute", "attr2"] + assert docstring.meta[4].description == "second attribute" + assert docstring.meta[5].args == ["examples"] + assert docstring.meta[5].description == "Many examples\nMore examples" + + +def test_google_parser_custom_sections_after() -> None: + """Test parsing an unknown section with custom GoogleParser configuration + that was set at a runtime. + """ + parser = GoogleParser(title_colon=False) + parser.add_section(Section("Note", "note", SectionType.SINGULAR)) + docstring = parser.parse( + """ + short description + + Note: + a note + """ + ) + assert docstring.short_description == "short description" + assert docstring.long_description == "Note:\n a note" + + docstring = parser.parse( + """ + short description + + Note a note + """ + ) + assert docstring.short_description == "short description" + assert docstring.long_description == "Note a note" + + docstring = parser.parse( + """ + short description + + Note + a note + """ + ) + assert len(docstring.meta) == 1 + assert docstring.meta[0].args == ["note"] + assert docstring.meta[0].description == "a note" + + +@pytest.mark.parametrize( + "source, expected", + [ + ("", None), + ("\n", None), + ("Short description", "Short description"), + ("\nShort description\n", "Short description"), + ("\n Short description\n", "Short description"), + ], +) +def test_short_description(source: str, expected: str) -> None: + """Test parsing short description.""" + docstring = parse(source) + assert docstring.short_description == expected + assert docstring.long_description is None + assert not docstring.meta + + +@pytest.mark.parametrize( + "source, expected_short_desc, expected_long_desc, expected_blank", + [ + ( + "Short description\n\nLong description", + "Short description", + "Long description", + True, + ), + ( + """ + Short description + + Long description + """, + "Short description", + "Long description", + True, + ), + ( + """ + Short description + + Long description + Second line + """, + "Short description", + "Long description\nSecond line", + True, + ), + ( + "Short description\nLong description", + "Short description", + "Long description", + False, + ), + ( + """ + Short description + Long description + """, + "Short description", + "Long description", + False, + ), + ( + "\nShort description\nLong description\n", + "Short description", + "Long description", + False, + ), + ( + """ + Short description + Long description + Second line + """, + "Short description", + "Long description\nSecond line", + False, + ), + ], +) +def test_long_description( + source: str, + expected_short_desc: str, + expected_long_desc: str, + expected_blank: bool, +) -> None: + """Test parsing long description.""" + docstring = parse(source) + assert docstring.short_description == expected_short_desc + assert docstring.long_description == expected_long_desc + assert docstring.blank_after_short_description == expected_blank + assert not docstring.meta + + +@pytest.mark.parametrize( + "source, expected_short_desc, expected_long_desc, " + "expected_blank_short_desc, expected_blank_long_desc", + [ + ( + """ + Short description + Args: + asd: + """, + "Short description", + None, + False, + False, + ), + ( + """ + Short description + Long description + Args: + asd: + """, + "Short description", + "Long description", + False, + False, + ), + ( + """ + Short description + First line + Second line + Args: + asd: + """, + "Short description", + "First line\n Second line", + False, + False, + ), + ( + """ + Short description + + First line + Second line + Args: + asd: + """, + "Short description", + "First line\n Second line", + True, + False, + ), + ( + """ + Short description + + First line + Second line + + Args: + asd: + """, + "Short description", + "First line\n Second line", + True, + True, + ), + ( + """ + Args: + asd: + """, + None, + None, + False, + False, + ), + ], +) +def test_meta_newlines( + source: str, + expected_short_desc: T.Optional[str], + expected_long_desc: T.Optional[str], + expected_blank_short_desc: bool, + expected_blank_long_desc: bool, +) -> None: + """Test parsing newlines around description sections.""" + docstring = parse(source) + assert docstring.short_description == expected_short_desc + assert docstring.long_description == expected_long_desc + assert docstring.blank_after_short_description == expected_blank_short_desc + assert docstring.blank_after_long_description == expected_blank_long_desc + assert len(docstring.meta) == 1 + + +def test_meta_with_multiline_description() -> None: + """Test parsing multiline meta documentation.""" + docstring = parse( + """ + Short description + + Args: + spam: asd + 1 + 2 + 3 + """ + ) + assert docstring.short_description == "Short description" + assert len(docstring.meta) == 1 + assert docstring.meta[0].args == ["param", "spam"] + assert docstring.meta[0].arg_name == "spam" + assert docstring.meta[0].description == "asd\n1\n 2\n3" + + +def test_default_args() -> None: + """Test parsing default arguments.""" + docstring = parse( + """A sample function + +A function the demonstrates docstrings + +Args: + arg1 (int): The firsty arg + arg2 (str): The second arg + arg3 (float, optional): The third arg. Defaults to 1.0. + arg4 (Optional[Dict[str, Any]], optional): The last arg. Defaults to None. + arg5 (str, optional): The fifth arg. Defaults to DEFAULT_ARG5. + +Returns: + Mapping[str, Any]: The args packed in a mapping +""" + ) + assert docstring is not None + assert len(docstring.params) == 5 + + arg4 = docstring.params[3] + assert arg4.arg_name == "arg4" + assert arg4.is_optional + assert arg4.type_name == "Optional[Dict[str, Any]]" + assert arg4.default == "None" + assert arg4.description == "The last arg. Defaults to None." + + +def test_multiple_meta() -> None: + """Test parsing multiple meta.""" + docstring = parse( + """ + Short description + + Args: + spam: asd + 1 + 2 + 3 + + Raises: + bla: herp + yay: derp + """ + ) + assert docstring.short_description == "Short description" + assert len(docstring.meta) == 3 + assert docstring.meta[0].args == ["param", "spam"] + assert docstring.meta[0].arg_name == "spam" + assert docstring.meta[0].description == "asd\n1\n 2\n3" + assert docstring.meta[1].args == ["raises", "bla"] + assert docstring.meta[1].type_name == "bla" + assert docstring.meta[1].description == "herp" + assert docstring.meta[2].args == ["raises", "yay"] + assert docstring.meta[2].type_name == "yay" + assert docstring.meta[2].description == "derp" + + +def test_params() -> None: + """Test parsing params.""" + docstring = parse("Short description") + assert len(docstring.params) == 0 + + docstring = parse( + """ + Short description + + Args: + name: description 1 + priority (int): description 2 + sender (str?): description 3 + ratio (Optional[float], optional): description 4 + """ + ) + assert len(docstring.params) == 4 + assert docstring.params[0].arg_name == "name" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == "description 1" + assert not docstring.params[0].is_optional + assert docstring.params[1].arg_name == "priority" + assert docstring.params[1].type_name == "int" + assert docstring.params[1].description == "description 2" + assert not docstring.params[1].is_optional + assert docstring.params[2].arg_name == "sender" + assert docstring.params[2].type_name == "str" + assert docstring.params[2].description == "description 3" + assert docstring.params[2].is_optional + assert docstring.params[3].arg_name == "ratio" + assert docstring.params[3].type_name == "Optional[float]" + assert docstring.params[3].description == "description 4" + assert docstring.params[3].is_optional + + docstring = parse( + """ + Short description + + Args: + name: description 1 + with multi-line text + priority (int): description 2 + """ + ) + assert len(docstring.params) == 2 + assert docstring.params[0].arg_name == "name" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == ( + "description 1\nwith multi-line text" + ) + assert docstring.params[1].arg_name == "priority" + assert docstring.params[1].type_name == "int" + assert docstring.params[1].description == "description 2" + + +def test_attributes() -> None: + """Test parsing attributes.""" + docstring = parse("Short description") + assert len(docstring.params) == 0 + + docstring = parse( + """ + Short description + + Attributes: + name: description 1 + priority (int): description 2 + sender (str?): description 3 + ratio (Optional[float], optional): description 4 + """ + ) + assert len(docstring.params) == 4 + assert docstring.params[0].arg_name == "name" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == "description 1" + assert not docstring.params[0].is_optional + assert docstring.params[1].arg_name == "priority" + assert docstring.params[1].type_name == "int" + assert docstring.params[1].description == "description 2" + assert not docstring.params[1].is_optional + assert docstring.params[2].arg_name == "sender" + assert docstring.params[2].type_name == "str" + assert docstring.params[2].description == "description 3" + assert docstring.params[2].is_optional + assert docstring.params[3].arg_name == "ratio" + assert docstring.params[3].type_name == "Optional[float]" + assert docstring.params[3].description == "description 4" + assert docstring.params[3].is_optional + + docstring = parse( + """ + Short description + + Attributes: + name: description 1 + with multi-line text + priority (int): description 2 + """ + ) + assert len(docstring.params) == 2 + assert docstring.params[0].arg_name == "name" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == ( + "description 1\nwith multi-line text" + ) + assert docstring.params[1].arg_name == "priority" + assert docstring.params[1].type_name == "int" + assert docstring.params[1].description == "description 2" + + +def test_returns() -> None: + """Test parsing returns.""" + docstring = parse( + """ + Short description + """ + ) + assert docstring.returns is None + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 0 + + docstring = parse( + """ + Short description + Returns: + description + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name is None + assert docstring.returns.description == "description" + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + docstring = parse( + """ + Short description + Returns: + description with: a colon! + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name is None + assert docstring.returns.description == "description with: a colon!" + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + docstring = parse( + """ + Short description + Returns: + int: description + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name == "int" + assert docstring.returns.description == "description" + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + docstring = parse( + """ + Returns: + Optional[Mapping[str, List[int]]]: A description: with a colon + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name == "Optional[Mapping[str, List[int]]]" + assert docstring.returns.description == "A description: with a colon" + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + docstring = parse( + """ + Short description + Yields: + int: description + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name == "int" + assert docstring.returns.description == "description" + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + docstring = parse( + """ + Short description + Returns: + int: description + with much text + + even some spacing + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name == "int" + assert docstring.returns.description == ( + "description\nwith much text\n\neven some spacing" + ) + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + +def test_raises() -> None: + """Test parsing raises.""" + docstring = parse( + """ + Short description + """ + ) + assert len(docstring.raises) == 0 + + docstring = parse( + """ + Short description + Raises: + ValueError: description + """ + ) + assert len(docstring.raises) == 1 + assert docstring.raises[0].type_name == "ValueError" + assert docstring.raises[0].description == "description" + + +def test_examples() -> None: + """Test parsing examples.""" + docstring = parse( + """ + Short description + Example: + example: 1 + Examples: + long example + + more here + """ + ) + assert len(docstring.examples) == 2 + assert docstring.examples[0].description == "example: 1" + assert docstring.examples[1].description == "long example\n\nmore here" + + +def test_broken_meta() -> None: + """Test parsing broken meta.""" + with pytest.raises(ParseError): + parse("Args:") + + with pytest.raises(ParseError): + parse("Args:\n herp derp") + + +def test_unknown_meta() -> None: + """Test parsing unknown meta.""" + docstring = parse( + """Short desc + + Unknown 0: + title0: content0 + + Args: + arg0: desc0 + arg1: desc1 + + Unknown1: + title1: content1 + + Unknown2: + title2: content2 + """ + ) + + assert docstring.params[0].arg_name == "arg0" + assert docstring.params[0].description == "desc0" + assert docstring.params[1].arg_name == "arg1" + assert docstring.params[1].description == "desc1" + + +def test_broken_arguments() -> None: + """Test parsing broken arguments.""" + with pytest.raises(ParseError): + parse( + """This is a test + + Args: + param - poorly formatted + """ + ) + + +def test_empty_example() -> None: + """Test parsing empty examples section.""" + docstring = parse( + """Short description + + Example: + + Raises: + IOError: some error + """ + ) + + assert len(docstring.examples) == 1 + assert docstring.examples[0].args == ["examples"] + assert docstring.examples[0].description == "" + + +@pytest.mark.parametrize( + "source, expected", + [ + ("", ""), + ("\n", ""), + ("Short description", "Short description"), + ("\nShort description\n", "Short description"), + ("\n Short description\n", "Short description"), + ( + "Short description\n\nLong description", + "Short description\n\nLong description", + ), + ( + """ + Short description + + Long description + """, + "Short description\n\nLong description", + ), + ( + """ + Short description + + Long description + Second line + """, + "Short description\n\nLong description\nSecond line", + ), + ( + "Short description\nLong description", + "Short description\nLong description", + ), + ( + """ + Short description + Long description + """, + "Short description\nLong description", + ), + ( + "\nShort description\nLong description\n", + "Short description\nLong description", + ), + ( + """ + Short description + Long description + Second line + """, + "Short description\nLong description\nSecond line", + ), + ( + """ + Short description + Meta: + asd + """, + "Short description\nMeta:\n asd", + ), + ( + """ + Short description + Long description + Meta: + asd + """, + "Short description\nLong description\nMeta:\n asd", + ), + ( + """ + Short description + First line + Second line + Meta: + asd + """, + "Short description\n" + "First line\n" + " Second line\n" + "Meta:\n" + " asd", + ), + ( + """ + Short description + + First line + Second line + Meta: + asd + """, + "Short description\n" + "\n" + "First line\n" + " Second line\n" + "Meta:\n" + " asd", + ), + ( + """ + Short description + + First line + Second line + + Meta: + asd + """, + "Short description\n" + "\n" + "First line\n" + " Second line\n" + "\n" + "Meta:\n" + " asd", + ), + ( + """ + Short description + + Meta: + asd + 1 + 2 + 3 + """, + "Short description\n" + "\n" + "Meta:\n" + " asd\n" + " 1\n" + " 2\n" + " 3", + ), + ( + """ + Short description + + Meta1: + asd + 1 + 2 + 3 + Meta2: + herp + Meta3: + derp + """, + "Short description\n" + "\n" + "Meta1:\n" + " asd\n" + " 1\n" + " 2\n" + " 3\n" + "Meta2:\n" + " herp\n" + "Meta3:\n" + " derp", + ), + ( + """ + Short description + + Args: + name: description 1 + priority (int): description 2 + sender (str, optional): description 3 + message (str, optional): description 4, defaults to 'hello' + multiline (str?): + long description 5, + defaults to 'bye' + """, + "Short description\n" + "\n" + "Args:\n" + " name: description 1\n" + " priority (int): description 2\n" + " sender (str?): description 3\n" + " message (str?): description 4, defaults to 'hello'\n" + " multiline (str?): long description 5,\n" + " defaults to 'bye'", + ), + ( + """ + Short description + Raises: + ValueError: description + """, + "Short description\nRaises:\n ValueError: description", + ), + ], +) +def test_compose(source: str, expected: str) -> None: + """Test compose in default mode.""" + assert compose(parse(source)) == expected + + +@pytest.mark.parametrize( + "source, expected", + [ + ( + """ + Short description + + Args: + name: description 1 + priority (int): description 2 + sender (str, optional): description 3 + message (str, optional): description 4, defaults to 'hello' + multiline (str?): + long description 5, + defaults to 'bye' + """, + "Short description\n" + "\n" + "Args:\n" + " name: description 1\n" + " priority (int): description 2\n" + " sender (str, optional): description 3\n" + " message (str, optional): description 4, defaults to 'hello'\n" + " multiline (str, optional): long description 5,\n" + " defaults to 'bye'", + ), + ], +) +def test_compose_clean(source: str, expected: str) -> None: + """Test compose in clean mode.""" + assert ( + compose(parse(source), rendering_style=RenderingStyle.CLEAN) + == expected + ) + + +@pytest.mark.parametrize( + "source, expected", + [ + ( + """ + Short description + + Args: + name: description 1 + priority (int): description 2 + sender (str, optional): description 3 + message (str, optional): description 4, defaults to 'hello' + multiline (str?): + long description 5, + defaults to 'bye' + """, + "Short description\n" + "\n" + "Args:\n" + " name:\n" + " description 1\n" + " priority (int):\n" + " description 2\n" + " sender (str, optional):\n" + " description 3\n" + " message (str, optional):\n" + " description 4, defaults to 'hello'\n" + " multiline (str, optional):\n" + " long description 5,\n" + " defaults to 'bye'", + ), + ], +) +def test_compose_expanded(source: str, expected: str) -> None: + """Test compose in expanded mode.""" + assert ( + compose(parse(source), rendering_style=RenderingStyle.EXPANDED) + == expected + ) diff --git a/py311/lib/python3.11/site-packages/docstring_parser/tests/test_numpydoc.py b/py311/lib/python3.11/site-packages/docstring_parser/tests/test_numpydoc.py new file mode 100644 index 0000000000000000000000000000000000000000..2394c61aa9a2daee04d1d3a6d56d7f53e3da0c9a --- /dev/null +++ b/py311/lib/python3.11/site-packages/docstring_parser/tests/test_numpydoc.py @@ -0,0 +1,1088 @@ +"""Tests for numpydoc-style docstring routines.""" + +import typing as T + +import pytest +from docstring_parser.numpydoc import compose, parse + + +@pytest.mark.parametrize( + "source, expected", + [ + ("", None), + ("\n", None), + ("Short description", "Short description"), + ("\nShort description\n", "Short description"), + ("\n Short description\n", "Short description"), + ], +) +def test_short_description(source: str, expected: str) -> None: + """Test parsing short description.""" + docstring = parse(source) + assert docstring.short_description == expected + assert docstring.long_description is None + assert not docstring.meta + + +@pytest.mark.parametrize( + "source, expected_short_desc, expected_long_desc, expected_blank", + [ + ( + "Short description\n\nLong description", + "Short description", + "Long description", + True, + ), + ( + """ + Short description + + Long description + """, + "Short description", + "Long description", + True, + ), + ( + """ + Short description + + Long description + Second line + """, + "Short description", + "Long description\nSecond line", + True, + ), + ( + "Short description\nLong description", + "Short description", + "Long description", + False, + ), + ( + """ + Short description + Long description + """, + "Short description", + "Long description", + False, + ), + ( + "\nShort description\nLong description\n", + "Short description", + "Long description", + False, + ), + ( + """ + Short description + Long description + Second line + """, + "Short description", + "Long description\nSecond line", + False, + ), + ], +) +def test_long_description( + source: str, + expected_short_desc: str, + expected_long_desc: str, + expected_blank: bool, +) -> None: + """Test parsing long description.""" + docstring = parse(source) + assert docstring.short_description == expected_short_desc + assert docstring.long_description == expected_long_desc + assert docstring.blank_after_short_description == expected_blank + assert not docstring.meta + + +@pytest.mark.parametrize( + "source, expected_short_desc, expected_long_desc, " + "expected_blank_short_desc, expected_blank_long_desc", + [ + ( + """ + Short description + Parameters + ---------- + asd + """, + "Short description", + None, + False, + False, + ), + ( + """ + Short description + Long description + Parameters + ---------- + asd + """, + "Short description", + "Long description", + False, + False, + ), + ( + """ + Short description + First line + Second line + Parameters + ---------- + asd + """, + "Short description", + "First line\n Second line", + False, + False, + ), + ( + """ + Short description + + First line + Second line + Parameters + ---------- + asd + """, + "Short description", + "First line\n Second line", + True, + False, + ), + ( + """ + Short description + + First line + Second line + + Parameters + ---------- + asd + """, + "Short description", + "First line\n Second line", + True, + True, + ), + ( + """ + Parameters + ---------- + asd + """, + None, + None, + False, + False, + ), + ], +) +def test_meta_newlines( + source: str, + expected_short_desc: T.Optional[str], + expected_long_desc: T.Optional[str], + expected_blank_short_desc: bool, + expected_blank_long_desc: bool, +) -> None: + """Test parsing newlines around description sections.""" + docstring = parse(source) + assert docstring.short_description == expected_short_desc + assert docstring.long_description == expected_long_desc + assert docstring.blank_after_short_description == expected_blank_short_desc + assert docstring.blank_after_long_description == expected_blank_long_desc + assert len(docstring.meta) == 1 + + +def test_meta_with_multiline_description() -> None: + """Test parsing multiline meta documentation.""" + docstring = parse( + """ + Short description + + Parameters + ---------- + spam + asd + 1 + 2 + 3 + """ + ) + assert docstring.short_description == "Short description" + assert len(docstring.meta) == 1 + assert docstring.meta[0].args == ["param", "spam"] + assert docstring.meta[0].arg_name == "spam" + assert docstring.meta[0].description == "asd\n1\n 2\n3" + + +@pytest.mark.parametrize( + "source, expected_is_optional, expected_type_name, expected_default", + [ + ( + """ + Parameters + ---------- + arg1 : int + The first arg + """, + False, + "int", + None, + ), + ( + """ + Parameters + ---------- + arg2 : str + The second arg + """, + False, + "str", + None, + ), + ( + """ + Parameters + ---------- + arg3 : float, optional + The third arg. Default is 1.0. + """, + True, + "float", + "1.0", + ), + ( + """ + Parameters + ---------- + arg4 : Optional[Dict[str, Any]], optional + The fourth arg. Defaults to None + """, + True, + "Optional[Dict[str, Any]]", + "None", + ), + ( + """ + Parameters + ---------- + arg5 : str, optional + The fifth arg. Default: DEFAULT_ARGS + """, + True, + "str", + "DEFAULT_ARGS", + ), + ( + """ + Parameters + ---------- + parameter_without_default : int + The parameter_without_default is required. + """, + False, + "int", + None, + ), + ], +) +def test_default_args( + source: str, + expected_is_optional: bool, + expected_type_name: T.Optional[str], + expected_default: T.Optional[str], +) -> None: + """Test parsing default arguments.""" + docstring = parse(source) + assert docstring is not None + assert len(docstring.params) == 1 + + arg1 = docstring.params[0] + assert arg1.is_optional == expected_is_optional + assert arg1.type_name == expected_type_name + assert arg1.default == expected_default + + +def test_multiple_meta() -> None: + """Test parsing multiple meta.""" + docstring = parse( + """ + Short description + + Parameters + ---------- + spam + asd + 1 + 2 + 3 + + Raises + ------ + bla + herp + yay + derp + """ + ) + assert docstring.short_description == "Short description" + assert len(docstring.meta) == 3 + assert docstring.meta[0].args == ["param", "spam"] + assert docstring.meta[0].arg_name == "spam" + assert docstring.meta[0].description == "asd\n1\n 2\n3" + assert docstring.meta[1].args == ["raises", "bla"] + assert docstring.meta[1].type_name == "bla" + assert docstring.meta[1].description == "herp" + assert docstring.meta[2].args == ["raises", "yay"] + assert docstring.meta[2].type_name == "yay" + assert docstring.meta[2].description == "derp" + + +def test_params() -> None: + """Test parsing params.""" + docstring = parse("Short description") + assert len(docstring.params) == 0 + + docstring = parse( + """ + Short description + + Parameters + ---------- + name + description 1 + priority : int + description 2 + sender : str, optional + description 3 + ratio : Optional[float], optional + description 4 + """ + ) + assert len(docstring.params) == 4 + assert docstring.params[0].arg_name == "name" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == "description 1" + assert not docstring.params[0].is_optional + assert docstring.params[1].arg_name == "priority" + assert docstring.params[1].type_name == "int" + assert docstring.params[1].description == "description 2" + assert not docstring.params[1].is_optional + assert docstring.params[2].arg_name == "sender" + assert docstring.params[2].type_name == "str" + assert docstring.params[2].description == "description 3" + assert docstring.params[2].is_optional + assert docstring.params[3].arg_name == "ratio" + assert docstring.params[3].type_name == "Optional[float]" + assert docstring.params[3].description == "description 4" + assert docstring.params[3].is_optional + + docstring = parse( + """ + Short description + + Parameters + ---------- + name + description 1 + with multi-line text + priority : int + description 2 + """ + ) + assert len(docstring.params) == 2 + assert docstring.params[0].arg_name == "name" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == ( + "description 1\nwith multi-line text" + ) + assert docstring.params[1].arg_name == "priority" + assert docstring.params[1].type_name == "int" + assert docstring.params[1].description == "description 2" + + +def test_attributes() -> None: + """Test parsing attributes.""" + docstring = parse("Short description") + assert len(docstring.params) == 0 + + docstring = parse( + """ + Short description + + Attributes + ---------- + name + description 1 + priority : int + description 2 + sender : str, optional + description 3 + ratio : Optional[float], optional + description 4 + """ + ) + assert len(docstring.params) == 4 + assert docstring.params[0].arg_name == "name" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == "description 1" + assert not docstring.params[0].is_optional + assert docstring.params[1].arg_name == "priority" + assert docstring.params[1].type_name == "int" + assert docstring.params[1].description == "description 2" + assert not docstring.params[1].is_optional + assert docstring.params[2].arg_name == "sender" + assert docstring.params[2].type_name == "str" + assert docstring.params[2].description == "description 3" + assert docstring.params[2].is_optional + assert docstring.params[3].arg_name == "ratio" + assert docstring.params[3].type_name == "Optional[float]" + assert docstring.params[3].description == "description 4" + assert docstring.params[3].is_optional + + docstring = parse( + """ + Short description + + Attributes + ---------- + name + description 1 + with multi-line text + priority : int + description 2 + """ + ) + assert len(docstring.params) == 2 + assert docstring.params[0].arg_name == "name" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == ( + "description 1\nwith multi-line text" + ) + assert docstring.params[1].arg_name == "priority" + assert docstring.params[1].type_name == "int" + assert docstring.params[1].description == "description 2" + + +def test_other_params() -> None: + """Test parsing other parameters.""" + docstring = parse( + """ + Short description + Other Parameters + ---------------- + only_seldom_used_keywords : type, optional + Explanation + common_parameters_listed_above : type, optional + Explanation + """ + ) + assert len(docstring.meta) == 2 + assert docstring.meta[0].args == [ + "other_param", + "only_seldom_used_keywords", + ] + assert docstring.meta[0].arg_name == "only_seldom_used_keywords" + assert docstring.meta[0].type_name == "type" + assert docstring.meta[0].is_optional + assert docstring.meta[0].description == "Explanation" + + assert docstring.meta[1].args == [ + "other_param", + "common_parameters_listed_above", + ] + + +def test_yields() -> None: + """Test parsing yields.""" + docstring = parse( + """ + Short description + Yields + ------ + int + description + """ + ) + assert len(docstring.meta) == 1 + assert docstring.meta[0].args == ["yields"] + assert docstring.meta[0].type_name == "int" + assert docstring.meta[0].description == "description" + assert docstring.meta[0].return_name is None + assert docstring.meta[0].is_generator + + +def test_returns() -> None: + """Test parsing returns.""" + docstring = parse( + """ + Short description + """ + ) + assert docstring.returns is None + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 0 + + docstring = parse( + """ + Short description + Returns + ------- + type + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name == "type" + assert docstring.returns.description is None + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + docstring = parse( + """ + Short description + Returns + ------- + int + description + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name == "int" + assert docstring.returns.description == "description" + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + docstring = parse( + """ + Returns + ------- + Optional[Mapping[str, List[int]]] + A description: with a colon + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name == "Optional[Mapping[str, List[int]]]" + assert docstring.returns.description == "A description: with a colon" + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + docstring = parse( + """ + Short description + Returns + ------- + int + description + with much text + + even some spacing + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name == "int" + assert docstring.returns.description == ( + "description\nwith much text\n\neven some spacing" + ) + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + docstring = parse( + """ + Short description + Returns + ------- + a : int + description for a + b : str + description for b + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name == "int" + assert docstring.returns.description == ("description for a") + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 2 + assert docstring.many_returns[0].type_name == "int" + assert docstring.many_returns[0].description == "description for a" + assert docstring.many_returns[0].return_name == "a" + assert docstring.many_returns[1].type_name == "str" + assert docstring.many_returns[1].description == "description for b" + assert docstring.many_returns[1].return_name == "b" + + +def test_raises() -> None: + """Test parsing raises.""" + docstring = parse( + """ + Short description + """ + ) + assert len(docstring.raises) == 0 + + docstring = parse( + """ + Short description + Raises + ------ + ValueError + description + """ + ) + assert len(docstring.raises) == 1 + assert docstring.raises[0].type_name == "ValueError" + assert docstring.raises[0].description == "description" + + +def test_warns() -> None: + """Test parsing warns.""" + docstring = parse( + """ + Short description + Warns + ----- + UserWarning + description + """ + ) + assert len(docstring.meta) == 1 + assert docstring.meta[0].type_name == "UserWarning" + assert docstring.meta[0].description == "description" + + +def test_simple_sections() -> None: + """Test parsing simple sections.""" + docstring = parse( + """ + Short description + + See Also + -------- + something : some thing you can also see + actually, anything can go in this section + + Warnings + -------- + Here be dragons + + Notes + ----- + None of this is real + + References + ---------- + Cite the relevant literature, e.g. [1]_. You may also cite these + references in the notes section above. + + .. [1] O. McNoleg, "The integration of GIS, remote sensing, + expert systems and adaptive co-kriging for environmental habitat + modelling of the Highland Haggis using object-oriented, fuzzy-logic + and neural-network techniques," Computers & Geosciences, vol. 22, + pp. 585-588, 1996. + """ + ) + assert len(docstring.meta) == 4 + assert docstring.meta[0].args == ["see_also"] + assert docstring.meta[0].description == ( + "something : some thing you can also see\n" + "actually, anything can go in this section" + ) + + assert docstring.meta[1].args == ["warnings"] + assert docstring.meta[1].description == "Here be dragons" + + assert docstring.meta[2].args == ["notes"] + assert docstring.meta[2].description == "None of this is real" + + assert docstring.meta[3].args == ["references"] + + +@pytest.mark.parametrize( + "source, expected_results", + [ + ( + "Description\nExamples\n--------\nlong example\n\nmore here", + [ + (None, "long example\n\nmore here"), + ], + ), + ( + "Description\nExamples\n--------\n>>> test", + [ + (">>> test", ""), + ], + ), + ( + "Description\nExamples\n--------\n>>> testa\n>>> testb", + [ + (">>> testa\n>>> testb", ""), + ], + ), + ( + "Description\nExamples\n--------\n>>> test1\ndesc1", + [ + (">>> test1", "desc1"), + ], + ), + ( + "Description\nExamples\n--------\n" + ">>> test1a\n>>> test1b\ndesc1a\ndesc1b", + [ + (">>> test1a\n>>> test1b", "desc1a\ndesc1b"), + ], + ), + ( + "Description\nExamples\n--------\n" + ">>> test1\ndesc1\n>>> test2\ndesc2", + [ + (">>> test1", "desc1"), + (">>> test2", "desc2"), + ], + ), + ( + "Description\nExamples\n--------\n" + ">>> test1a\n>>> test1b\ndesc1a\ndesc1b\n" + ">>> test2a\n>>> test2b\ndesc2a\ndesc2b\n", + [ + (">>> test1a\n>>> test1b", "desc1a\ndesc1b"), + (">>> test2a\n>>> test2b", "desc2a\ndesc2b"), + ], + ), + ( + "Description\nExamples\n--------\n" + " >>> test1a\n >>> test1b\n desc1a\n desc1b\n" + " >>> test2a\n >>> test2b\n desc2a\n desc2b\n", + [ + (">>> test1a\n>>> test1b", "desc1a\ndesc1b"), + (">>> test2a\n>>> test2b", "desc2a\ndesc2b"), + ], + ), + ], +) +def test_examples( + source, expected_results: T.List[T.Tuple[T.Optional[str], str]] +) -> None: + """Test parsing examples.""" + docstring = parse(source) + assert len(docstring.meta) == len(expected_results) + for meta, expected_result in zip(docstring.meta, expected_results): + assert meta.description == expected_result[1] + assert len(docstring.examples) == len(expected_results) + for example, expected_result in zip(docstring.examples, expected_results): + assert example.snippet == expected_result[0] + assert example.description == expected_result[1] + + +@pytest.mark.parametrize( + "source, expected_depr_version, expected_depr_desc", + [ + ( + "Short description\n\n.. deprecated:: 1.6.0\n This is busted!", + "1.6.0", + "This is busted!", + ), + ( + ( + "Short description\n\n" + ".. deprecated:: 1.6.0\n" + " This description has\n" + " multiple lines!" + ), + "1.6.0", + "This description has\nmultiple lines!", + ), + ("Short description\n\n.. deprecated:: 1.6.0", "1.6.0", None), + ( + "Short description\n\n.. deprecated::\n No version!", + None, + "No version!", + ), + ], +) +def test_deprecation( + source: str, + expected_depr_version: T.Optional[str], + expected_depr_desc: T.Optional[str], +) -> None: + """Test parsing deprecation notes.""" + docstring = parse(source) + + assert docstring.deprecation is not None + assert docstring.deprecation.version == expected_depr_version + assert docstring.deprecation.description == expected_depr_desc + + +@pytest.mark.parametrize( + "source, expected", + [ + ("", ""), + ("\n", ""), + ("Short description", "Short description"), + ("\nShort description\n", "Short description"), + ("\n Short description\n", "Short description"), + ( + "Short description\n\nLong description", + "Short description\n\nLong description", + ), + ( + """ + Short description + + Long description + """, + "Short description\n\nLong description", + ), + ( + """ + Short description + + Long description + Second line + """, + "Short description\n\nLong description\nSecond line", + ), + ( + "Short description\nLong description", + "Short description\nLong description", + ), + ( + """ + Short description + Long description + """, + "Short description\nLong description", + ), + ( + "\nShort description\nLong description\n", + "Short description\nLong description", + ), + ( + """ + Short description + Long description + Second line + """, + "Short description\nLong description\nSecond line", + ), + ( + """ + Short description + Meta: + ----- + asd + """, + "Short description\nMeta:\n-----\n asd", + ), + ( + """ + Short description + Long description + Meta: + ----- + asd + """, + "Short description\n" + "Long description\n" + "Meta:\n" + "-----\n" + " asd", + ), + ( + """ + Short description + First line + Second line + Meta: + ----- + asd + """, + "Short description\n" + "First line\n" + " Second line\n" + "Meta:\n" + "-----\n" + " asd", + ), + ( + """ + Short description + + First line + Second line + Meta: + ----- + asd + """, + "Short description\n" + "\n" + "First line\n" + " Second line\n" + "Meta:\n" + "-----\n" + " asd", + ), + ( + """ + Short description + + First line + Second line + + Meta: + ----- + asd + """, + "Short description\n" + "\n" + "First line\n" + " Second line\n" + "\n" + "Meta:\n" + "-----\n" + " asd", + ), + ( + """ + Short description + + Meta: + ----- + asd + 1 + 2 + 3 + """, + "Short description\n" + "\n" + "Meta:\n" + "-----\n" + " asd\n" + " 1\n" + " 2\n" + " 3", + ), + ( + """ + Short description + + Meta1: + ------ + asd + 1 + 2 + 3 + Meta2: + ------ + herp + Meta3: + ------ + derp + """, + "Short description\n" + "\n" + "Meta1:\n" + "------\n" + " asd\n" + " 1\n" + " 2\n" + " 3\n" + "Meta2:\n" + "------\n" + " herp\n" + "Meta3:\n" + "------\n" + " derp", + ), + ( + """ + Short description + + Parameters: + ----------- + name + description 1 + priority: int + description 2 + sender: str, optional + description 3 + message: str, optional + description 4, defaults to 'hello' + multiline: str, optional + long description 5, + defaults to 'bye' + """, + "Short description\n" + "\n" + "Parameters:\n" + "-----------\n" + " name\n" + " description 1\n" + " priority: int\n" + " description 2\n" + " sender: str, optional\n" + " description 3\n" + " message: str, optional\n" + " description 4, defaults to 'hello'\n" + " multiline: str, optional\n" + " long description 5,\n" + " defaults to 'bye'", + ), + ( + """ + Short description + Raises: + ------- + ValueError + description + """, + "Short description\n" + "Raises:\n" + "-------\n" + " ValueError\n" + " description", + ), + ( + """ + Description + Examples: + -------- + >>> test1a + >>> test1b + desc1a + desc1b + >>> test2a + >>> test2b + desc2a + desc2b + """, + "Description\n" + "Examples:\n" + "--------\n" + ">>> test1a\n" + ">>> test1b\n" + "desc1a\n" + "desc1b\n" + ">>> test2a\n" + ">>> test2b\n" + "desc2a\n" + "desc2b", + ), + ], +) +def test_compose(source: str, expected: str) -> None: + """Test compose in default mode.""" + assert compose(parse(source)) == expected diff --git a/py311/lib/python3.11/site-packages/docstring_parser/tests/test_parse_from_object.py b/py311/lib/python3.11/site-packages/docstring_parser/tests/test_parse_from_object.py new file mode 100644 index 0000000000000000000000000000000000000000..0dcba3de6dedec2381ed22ef483e4d6c1a9eeab4 --- /dev/null +++ b/py311/lib/python3.11/site-packages/docstring_parser/tests/test_parse_from_object.py @@ -0,0 +1,110 @@ +"""Tests for parse_from_object function and attribute docstrings.""" + +from unittest.mock import patch + +from docstring_parser import parse_from_object + +module_attr: int = 1 +"""Description for module_attr""" + + +def test_from_module_attribute_docstrings() -> None: + """Test the parse of attribute docstrings from a module.""" + from . import test_parse_from_object # pylint: disable=C0415,W0406 + + docstring = parse_from_object(test_parse_from_object) + + assert "parse_from_object" in docstring.short_description + assert len(docstring.params) == 1 + assert docstring.params[0].arg_name == "module_attr" + assert docstring.params[0].type_name == "int" + assert docstring.params[0].description == "Description for module_attr" + + +def test_from_class_attribute_docstrings() -> None: + """Test the parse of attribute docstrings from a class.""" + + class StandardCase: + """Short description + Long description + """ + + attr_one: str + """Description for attr_one""" + attr_two: bool = False + """Description for attr_two""" + + docstring = parse_from_object(StandardCase) + + assert docstring.short_description == "Short description" + assert docstring.long_description == "Long description" + assert docstring.description == "Short description\nLong description" + assert len(docstring.params) == 2 + assert docstring.params[0].arg_name == "attr_one" + assert docstring.params[0].type_name == "str" + assert docstring.params[0].description == "Description for attr_one" + assert docstring.params[1].arg_name == "attr_two" + assert docstring.params[1].type_name == "bool" + assert docstring.params[1].description == "Description for attr_two" + + +def test_from_class_attribute_docstrings_without_type() -> None: + """Test the parse of untyped attribute docstrings.""" + + class WithoutType: # pylint: disable=missing-class-docstring + attr_one = "value" + """Description for attr_one""" + + docstring = parse_from_object(WithoutType) + + assert docstring.short_description is None + assert docstring.long_description is None + assert docstring.description is None + assert len(docstring.params) == 1 + assert docstring.params[0].arg_name == "attr_one" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == "Description for attr_one" + + +def test_from_class_without_source() -> None: + """Test the parse of class when source is unavailable.""" + + class WithoutSource: + """Short description""" + + attr_one: str + """Description for attr_one""" + + with patch( + "inspect.getsource", side_effect=OSError("could not get source code") + ): + docstring = parse_from_object(WithoutSource) + + assert docstring.short_description == "Short description" + assert docstring.long_description is None + assert docstring.description == "Short description" + assert len(docstring.params) == 0 + + +def test_from_function() -> None: + """Test the parse of a function docstring.""" + + def a_function(param1: str, param2: int = 2): + """Short description + Args: + param1: Description for param1 + param2: Description for param2 + """ + return f"{param1} {param2}" + + docstring = parse_from_object(a_function) + + assert docstring.short_description == "Short description" + assert docstring.description == "Short description" + assert len(docstring.params) == 2 + assert docstring.params[0].arg_name == "param1" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == "Description for param1" + assert docstring.params[1].arg_name == "param2" + assert docstring.params[1].type_name is None + assert docstring.params[1].description == "Description for param2" diff --git a/py311/lib/python3.11/site-packages/docstring_parser/tests/test_parser.py b/py311/lib/python3.11/site-packages/docstring_parser/tests/test_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..73b8c9c602563385888d939a7986574ae21b1901 --- /dev/null +++ b/py311/lib/python3.11/site-packages/docstring_parser/tests/test_parser.py @@ -0,0 +1,223 @@ +"""Tests for generic docstring routines.""" + +import pytest +from docstring_parser.common import DocstringStyle, ParseError +from docstring_parser.parser import parse + + +def test_rest() -> None: + """Test ReST-style parser autodetection.""" + docstring = parse( + """ + Short description + + Long description + + Causing people to indent: + + A lot sometimes + + :param spam: spam desc + :param int bla: bla desc + :param str yay: + :raises ValueError: exc desc + :returns tuple: ret desc + """ + ) + + assert docstring.style == DocstringStyle.REST + assert docstring.short_description == "Short description" + assert docstring.long_description == ( + "Long description\n\n" + "Causing people to indent:\n\n" + " A lot sometimes" + ) + assert docstring.description == ( + "Short description\n\n" + "Long description\n\n" + "Causing people to indent:\n\n" + " A lot sometimes" + ) + assert len(docstring.params) == 3 + assert docstring.params[0].arg_name == "spam" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == "spam desc" + assert docstring.params[1].arg_name == "bla" + assert docstring.params[1].type_name == "int" + assert docstring.params[1].description == "bla desc" + assert docstring.params[2].arg_name == "yay" + assert docstring.params[2].type_name == "str" + assert docstring.params[2].description == "" + assert len(docstring.raises) == 1 + assert docstring.raises[0].type_name == "ValueError" + assert docstring.raises[0].description == "exc desc" + assert docstring.returns is not None + assert docstring.returns.type_name == "tuple" + assert docstring.returns.description == "ret desc" + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + +def test_google() -> None: + """Test Google-style parser autodetection.""" + docstring = parse( + """Short description + + Long description + + Causing people to indent: + + A lot sometimes + + Args: + spam: spam desc + bla (int): bla desc + yay (str): + + Raises: + ValueError: exc desc + + Returns: + tuple: ret desc + """ + ) + + assert docstring.style == DocstringStyle.GOOGLE + assert docstring.short_description == "Short description" + assert docstring.long_description == ( + "Long description\n\n" + "Causing people to indent:\n\n" + " A lot sometimes" + ) + assert docstring.description == ( + "Short description\n\n" + "Long description\n\n" + "Causing people to indent:\n\n" + " A lot sometimes" + ) + assert len(docstring.params) == 3 + assert docstring.params[0].arg_name == "spam" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == "spam desc" + assert docstring.params[1].arg_name == "bla" + assert docstring.params[1].type_name == "int" + assert docstring.params[1].description == "bla desc" + assert docstring.params[2].arg_name == "yay" + assert docstring.params[2].type_name == "str" + assert docstring.params[2].description == "" + assert len(docstring.raises) == 1 + assert docstring.raises[0].type_name == "ValueError" + assert docstring.raises[0].description == "exc desc" + assert docstring.returns is not None + assert docstring.returns.type_name == "tuple" + assert docstring.returns.description == "ret desc" + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + +def test_numpydoc() -> None: + """Test numpydoc-style parser autodetection.""" + docstring = parse( + """Short description + + Long description + + Causing people to indent: + + A lot sometimes + + Parameters + ---------- + spam + spam desc + bla : int + bla desc + yay : str + + Raises + ------ + ValueError + exc desc + + Other Parameters + ---------------- + this_guy : int, optional + you know him + + Returns + ------- + tuple + ret desc + + See Also + -------- + multiple lines... + something else? + + Warnings + -------- + multiple lines... + none of this is real! + """ + ) + + assert docstring.style == DocstringStyle.NUMPYDOC + assert docstring.short_description == "Short description" + assert docstring.long_description == ( + "Long description\n\n" + "Causing people to indent:\n\n" + " A lot sometimes" + ) + assert docstring.description == ( + "Short description\n\n" + "Long description\n\n" + "Causing people to indent:\n\n" + " A lot sometimes" + ) + assert len(docstring.params) == 4 + assert docstring.params[0].arg_name == "spam" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == "spam desc" + assert docstring.params[1].arg_name == "bla" + assert docstring.params[1].type_name == "int" + assert docstring.params[1].description == "bla desc" + assert docstring.params[2].arg_name == "yay" + assert docstring.params[2].type_name == "str" + assert docstring.params[2].description is None + assert docstring.params[3].arg_name == "this_guy" + assert docstring.params[3].type_name == "int" + assert docstring.params[3].is_optional + assert docstring.params[3].description == "you know him" + + assert len(docstring.raises) == 1 + assert docstring.raises[0].type_name == "ValueError" + assert docstring.raises[0].description == "exc desc" + assert docstring.returns is not None + assert docstring.returns.type_name == "tuple" + assert docstring.returns.description == "ret desc" + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + +def test_autodetection_error_detection() -> None: + """Test autodection for the case where one of the parsers throws an error + and another one succeeds. + """ + source = """ + Does something useless + + :param 3 + 3 a: a param + """ + + with pytest.raises(ParseError): + # assert that one of the parsers does raise + parse(source, DocstringStyle.REST) + + # assert that autodetection still works + docstring = parse(source) + + assert docstring + assert docstring.style == DocstringStyle.GOOGLE diff --git a/py311/lib/python3.11/site-packages/docstring_parser/tests/test_rest.py b/py311/lib/python3.11/site-packages/docstring_parser/tests/test_rest.py new file mode 100644 index 0000000000000000000000000000000000000000..411c27720962765fdad425f3347ad14fd60c8478 --- /dev/null +++ b/py311/lib/python3.11/site-packages/docstring_parser/tests/test_rest.py @@ -0,0 +1,542 @@ +"""Tests for ReST-style docstring routines.""" + +import typing as T + +import pytest +from docstring_parser.common import ParseError, RenderingStyle +from docstring_parser.rest import compose, parse + + +@pytest.mark.parametrize( + "source, expected", + [ + ("", None), + ("\n", None), + ("Short description", "Short description"), + ("\nShort description\n", "Short description"), + ("\n Short description\n", "Short description"), + ], +) +def test_short_description(source: str, expected: str) -> None: + """Test parsing short description.""" + docstring = parse(source) + assert docstring.short_description == expected + assert docstring.description == expected + assert docstring.long_description is None + assert not docstring.meta + + +@pytest.mark.parametrize( + "source, expected_short_desc, expected_long_desc, expected_blank", + [ + ( + "Short description\n\nLong description", + "Short description", + "Long description", + True, + ), + ( + """ + Short description + + Long description + """, + "Short description", + "Long description", + True, + ), + ( + """ + Short description + + Long description + Second line + """, + "Short description", + "Long description\nSecond line", + True, + ), + ( + "Short description\nLong description", + "Short description", + "Long description", + False, + ), + ( + """ + Short description + Long description + """, + "Short description", + "Long description", + False, + ), + ( + "\nShort description\nLong description\n", + "Short description", + "Long description", + False, + ), + ( + """ + Short description + Long description + Second line + """, + "Short description", + "Long description\nSecond line", + False, + ), + ], +) +def test_long_description( + source: str, + expected_short_desc: str, + expected_long_desc: str, + expected_blank: bool, +) -> None: + """Test parsing long description.""" + docstring = parse(source) + assert docstring.short_description == expected_short_desc + assert docstring.long_description == expected_long_desc + assert docstring.blank_after_short_description == expected_blank + assert not docstring.meta + + +@pytest.mark.parametrize( + "source, expected_short_desc, expected_long_desc, " + "expected_blank_short_desc, expected_blank_long_desc, " + "expected_full_desc", + [ + ( + """ + Short description + :meta: asd + """, + "Short description", + None, + False, + False, + "Short description", + ), + ( + """ + Short description + Long description + :meta: asd + """, + "Short description", + "Long description", + False, + False, + "Short description\nLong description", + ), + ( + """ + Short description + First line + Second line + :meta: asd + """, + "Short description", + "First line\n Second line", + False, + False, + "Short description\nFirst line\n Second line", + ), + ( + """ + Short description + + First line + Second line + :meta: asd + """, + "Short description", + "First line\n Second line", + True, + False, + "Short description\n\nFirst line\n Second line", + ), + ( + """ + Short description + + First line + Second line + + :meta: asd + """, + "Short description", + "First line\n Second line", + True, + True, + "Short description\n\nFirst line\n Second line", + ), + ( + """ + :meta: asd + """, + None, + None, + False, + False, + None, + ), + ], +) +def test_meta_newlines( + source: str, + expected_short_desc: T.Optional[str], + expected_long_desc: T.Optional[str], + expected_blank_short_desc: bool, + expected_blank_long_desc: bool, + expected_full_desc: T.Optional[str], +) -> None: + """Test parsing newlines around description sections.""" + docstring = parse(source) + assert docstring.short_description == expected_short_desc + assert docstring.long_description == expected_long_desc + assert docstring.blank_after_short_description == expected_blank_short_desc + assert docstring.blank_after_long_description == expected_blank_long_desc + assert docstring.description == expected_full_desc + assert len(docstring.meta) == 1 + + +def test_meta_with_multiline_description() -> None: + """Test parsing multiline meta documentation.""" + docstring = parse( + """ + Short description + + :meta: asd + 1 + 2 + 3 + """ + ) + assert docstring.short_description == "Short description" + assert len(docstring.meta) == 1 + assert docstring.meta[0].args == ["meta"] + assert docstring.meta[0].description == "asd\n1\n 2\n3" + + +def test_multiple_meta() -> None: + """Test parsing multiple meta.""" + docstring = parse( + """ + Short description + + :meta1: asd + 1 + 2 + 3 + :meta2: herp + :meta3: derp + """ + ) + assert docstring.short_description == "Short description" + assert len(docstring.meta) == 3 + assert docstring.meta[0].args == ["meta1"] + assert docstring.meta[0].description == "asd\n1\n 2\n3" + assert docstring.meta[1].args == ["meta2"] + assert docstring.meta[1].description == "herp" + assert docstring.meta[2].args == ["meta3"] + assert docstring.meta[2].description == "derp" + + +def test_meta_with_args() -> None: + """Test parsing meta with additional arguments.""" + docstring = parse( + """ + Short description + + :meta ene due rabe: asd + """ + ) + assert docstring.short_description == "Short description" + assert len(docstring.meta) == 1 + assert docstring.meta[0].args == ["meta", "ene", "due", "rabe"] + assert docstring.meta[0].description == "asd" + + +def test_params() -> None: + """Test parsing params.""" + docstring = parse("Short description") + assert len(docstring.params) == 0 + + docstring = parse( + """ + Short description + + :param name: description 1 + :param int priority: description 2 + :param str? sender: description 3 + :param str? message: description 4, defaults to 'hello' + :param str? multiline: long description 5, + defaults to 'bye' + """ + ) + assert len(docstring.params) == 5 + assert docstring.params[0].arg_name == "name" + assert docstring.params[0].type_name is None + assert docstring.params[0].description == "description 1" + assert docstring.params[0].default is None + assert not docstring.params[0].is_optional + assert docstring.params[1].arg_name == "priority" + assert docstring.params[1].type_name == "int" + assert docstring.params[1].description == "description 2" + assert not docstring.params[1].is_optional + assert docstring.params[1].default is None + assert docstring.params[2].arg_name == "sender" + assert docstring.params[2].type_name == "str" + assert docstring.params[2].description == "description 3" + assert docstring.params[2].is_optional + assert docstring.params[2].default is None + assert docstring.params[3].arg_name == "message" + assert docstring.params[3].type_name == "str" + assert ( + docstring.params[3].description == "description 4, defaults to 'hello'" + ) + assert docstring.params[3].is_optional + assert docstring.params[3].default == "'hello'" + assert docstring.params[4].arg_name == "multiline" + assert docstring.params[4].type_name == "str" + assert ( + docstring.params[4].description + == "long description 5,\ndefaults to 'bye'" + ) + assert docstring.params[4].is_optional + assert docstring.params[4].default == "'bye'" + + docstring = parse( + """ + Short description + + :param a: description a + :type a: int + :param int b: description b + """ + ) + assert len(docstring.params) == 2 + assert docstring.params[0].arg_name == "a" + assert docstring.params[0].type_name == "int" + assert docstring.params[0].description == "description a" + assert docstring.params[0].default is None + assert not docstring.params[0].is_optional + + +def test_returns() -> None: + """Test parsing returns.""" + docstring = parse( + """ + Short description + """ + ) + assert docstring.returns is None + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 0 + + docstring = parse( + """ + Short description + :returns: description + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name is None + assert docstring.returns.description == "description" + assert not docstring.returns.is_generator + assert docstring.many_returns == [docstring.returns] + + docstring = parse( + """ + Short description + :returns int: description + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name == "int" + assert docstring.returns.description == "description" + assert not docstring.returns.is_generator + assert docstring.many_returns == [docstring.returns] + + docstring = parse( + """ + Short description + :returns: description + :rtype: int + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name == "int" + assert docstring.returns.description == "description" + assert not docstring.returns.is_generator + assert docstring.many_returns == [docstring.returns] + + +def test_yields() -> None: + """Test parsing yields.""" + docstring = parse( + """ + Short description + """ + ) + assert docstring.returns is None + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 0 + + docstring = parse( + """ + Short description + :yields: description + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name is None + assert docstring.returns.description == "description" + assert docstring.returns.is_generator + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + docstring = parse( + """ + Short description + :yields int: description + """ + ) + assert docstring.returns is not None + assert docstring.returns.type_name == "int" + assert docstring.returns.description == "description" + assert docstring.returns.is_generator + assert docstring.many_returns is not None + assert len(docstring.many_returns) == 1 + assert docstring.many_returns[0] == docstring.returns + + +def test_raises() -> None: + """Test parsing raises.""" + docstring = parse( + """ + Short description + """ + ) + assert len(docstring.raises) == 0 + + docstring = parse( + """ + Short description + :raises: description + """ + ) + assert len(docstring.raises) == 1 + assert docstring.raises[0].type_name is None + assert docstring.raises[0].description == "description" + + docstring = parse( + """ + Short description + :raises ValueError: description + """ + ) + assert len(docstring.raises) == 1 + assert docstring.raises[0].type_name == "ValueError" + assert docstring.raises[0].description == "description" + + +def test_broken_meta() -> None: + """Test parsing broken meta.""" + with pytest.raises(ParseError): + parse(":") + + with pytest.raises(ParseError): + parse(":param herp derp") + + with pytest.raises(ParseError): + parse(":param: invalid") + + with pytest.raises(ParseError): + parse(":param with too many args: desc") + + # these should not raise any errors + parse(":sthstrange: desc") + + +def test_deprecation() -> None: + """Test parsing deprecation notes.""" + docstring = parse(":deprecation: 1.1.0 this function will be removed") + assert docstring.deprecation is not None + assert docstring.deprecation.version == "1.1.0" + assert docstring.deprecation.description == "this function will be removed" + + docstring = parse(":deprecation: this function will be removed") + assert docstring.deprecation is not None + assert docstring.deprecation.version is None + assert docstring.deprecation.description == "this function will be removed" + + +@pytest.mark.parametrize( + "rendering_style, expected", + [ + ( + RenderingStyle.COMPACT, + "Short description.\n" + "\n" + "Long description.\n" + "\n" + ":param int foo: a description\n" + ":param int bar: another description\n" + ":returns float: a return", + ), + ( + RenderingStyle.CLEAN, + "Short description.\n" + "\n" + "Long description.\n" + "\n" + ":param int foo: a description\n" + ":param int bar: another description\n" + ":returns float: a return", + ), + ( + RenderingStyle.EXPANDED, + "Short description.\n" + "\n" + "Long description.\n" + "\n" + ":param foo:\n" + " a description\n" + ":type foo: int\n" + ":param bar:\n" + " another description\n" + ":type bar: int\n" + ":returns:\n" + " a return\n" + ":rtype: float", + ), + ], +) +def test_compose(rendering_style: RenderingStyle, expected: str) -> None: + """Test compose""" + + docstring = parse( + """ + Short description. + + Long description. + + :param int foo: a description + :param int bar: another description + :return float: a return + """ + ) + assert compose(docstring, rendering_style=rendering_style) == expected + + +def test_short_rtype() -> None: + """Test abbreviated docstring with only return type information.""" + string = "Short description.\n\n:rtype: float" + docstring = parse(string) + rendering_style = RenderingStyle.EXPANDED + assert compose(docstring, rendering_style=rendering_style) == string diff --git a/py311/lib/python3.11/site-packages/docstring_parser/tests/test_util.py b/py311/lib/python3.11/site-packages/docstring_parser/tests/test_util.py new file mode 100644 index 0000000000000000000000000000000000000000..00bc2f985f35805c3bf10766bb4f80ff0d0a3ee5 --- /dev/null +++ b/py311/lib/python3.11/site-packages/docstring_parser/tests/test_util.py @@ -0,0 +1,64 @@ +"""Test for utility functions.""" + +from docstring_parser.common import DocstringReturns +from docstring_parser.util import combine_docstrings + + +def test_combine_docstrings() -> None: + """Test combine_docstrings wrapper.""" + + def fun1(arg_a, arg_b, arg_c, arg_d): + """short_description: fun1 + + :param arg_a: fun1 + :param arg_b: fun1 + :return: fun1 + """ + assert arg_a and arg_b and arg_c and arg_d + + def fun2(arg_b, arg_c, arg_d, arg_e): + """short_description: fun2 + + long_description: fun2 + + :param arg_b: fun2 + :param arg_c: fun2 + :param arg_e: fun2 + """ + assert arg_b and arg_c and arg_d and arg_e + + @combine_docstrings(fun1, fun2) + def decorated1(arg_a, arg_b, arg_c, arg_d, arg_e, arg_f): + """ + :param arg_e: decorated + :param arg_f: decorated + """ + assert arg_a and arg_b and arg_c and arg_d and arg_e and arg_f + + assert decorated1.__doc__ == ( + "short_description: fun2\n" + "\n" + "long_description: fun2\n" + "\n" + ":param arg_a: fun1\n" + ":param arg_b: fun1\n" + ":param arg_c: fun2\n" + ":param arg_e: fun2\n" + ":param arg_f: decorated\n" + ":returns: fun1" + ) + + @combine_docstrings(fun1, fun2, exclude=[DocstringReturns]) + def decorated2(arg_a, arg_b, arg_c, arg_d, arg_e, arg_f): + assert arg_a and arg_b and arg_c and arg_d and arg_e and arg_f + + assert decorated2.__doc__ == ( + "short_description: fun2\n" + "\n" + "long_description: fun2\n" + "\n" + ":param arg_a: fun1\n" + ":param arg_b: fun1\n" + ":param arg_c: fun2\n" + ":param arg_e: fun2" + ) diff --git a/py311/lib/python3.11/site-packages/docstring_parser/util.py b/py311/lib/python3.11/site-packages/docstring_parser/util.py new file mode 100644 index 0000000000000000000000000000000000000000..c5be2f5728d40522b23a02a72b1db8ce0da82cb4 --- /dev/null +++ b/py311/lib/python3.11/site-packages/docstring_parser/util.py @@ -0,0 +1,145 @@ +"""Utility functions for working with docstrings.""" + +import typing as T +from collections import ChainMap +from inspect import Signature +from itertools import chain + +from .common import ( + DocstringMeta, + DocstringParam, + DocstringReturns, + DocstringStyle, + RenderingStyle, +) +from .parser import compose, parse + +_Func = T.Callable[..., T.Any] + +assert DocstringReturns # used in docstring + + +def combine_docstrings( + *others: _Func, + exclude: T.Iterable[T.Type[DocstringMeta]] = (), + style: DocstringStyle = DocstringStyle.AUTO, + rendering_style: RenderingStyle = RenderingStyle.COMPACT, +) -> _Func: + """A function decorator that parses the docstrings from `others`, + programmatically combines them with the parsed docstring of the decorated + function, and replaces the docstring of the decorated function with the + composed result. Only parameters that are part of the decorated functions + signature are included in the combined docstring. When multiple sources for + a parameter or docstring metadata exists then the decorator will first + default to the wrapped function's value (when available) and otherwise use + the rightmost definition from ``others``. + + The following example illustrates its usage: + + >>> def fun1(a, b, c, d): + ... '''short_description: fun1 + ... + ... :param a: fun1 + ... :param b: fun1 + ... :return: fun1 + ... ''' + >>> def fun2(b, c, d, e): + ... '''short_description: fun2 + ... + ... long_description: fun2 + ... + ... :param b: fun2 + ... :param c: fun2 + ... :param e: fun2 + ... ''' + >>> @combine_docstrings(fun1, fun2) + >>> def decorated(a, b, c, d, e, f): + ... ''' + ... :param e: decorated + ... :param f: decorated + ... ''' + >>> print(decorated.__doc__) + short_description: fun2 + + long_description: fun2 + + :param a: fun1 + :param b: fun1 + :param c: fun2 + :param e: fun2 + :param f: decorated + :returns: fun1 + >>> @combine_docstrings(fun1, fun2, exclude=[DocstringReturns]) + >>> def decorated(a, b, c, d, e, f): pass + >>> print(decorated.__doc__) + short_description: fun2 + + long_description: fun2 + + :param a: fun1 + :param b: fun1 + :param c: fun2 + :param e: fun2 + + :param others: callables from which to parse docstrings. + :param exclude: an iterable of ``DocstringMeta`` subclasses to exclude when + combining docstrings. + :param style: style composed docstring. The default will infer the style + from the decorated function. + :param rendering_style: The rendering style used to compose a docstring. + :return: the decorated function with a modified docstring. + """ + + def wrapper(func: _Func) -> _Func: + sig = Signature.from_callable(func) + + comb_doc = parse(func.__doc__ or "") + docs = [parse(other.__doc__ or "") for other in others] + [comb_doc] + params = dict( + ChainMap( + *( + {param.arg_name: param for param in doc.params} + for doc in docs + ) + ) + ) + + for doc in reversed(docs): + if not doc.short_description: + continue + comb_doc.short_description = doc.short_description + comb_doc.blank_after_short_description = ( + doc.blank_after_short_description + ) + break + + for doc in reversed(docs): + if not doc.long_description: + continue + comb_doc.long_description = doc.long_description + comb_doc.blank_after_long_description = ( + doc.blank_after_long_description + ) + break + + combined = {} + for doc in docs: + metas = {} + for meta in doc.meta: + meta_type = type(meta) + if meta_type in exclude: + continue + metas.setdefault(meta_type, []).append(meta) + for meta_type, meta in metas.items(): + combined[meta_type] = meta + + combined[DocstringParam] = [ + params[name] for name in sig.parameters if name in params + ] + comb_doc.meta = list(chain(*combined.values())) + func.__doc__ = compose( + comb_doc, style=style, rendering_style=rendering_style + ) + return func + + return wrapper diff --git a/py311/lib/python3.11/site-packages/fontTools/__init__.py b/py311/lib/python3.11/site-packages/fontTools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..953cda045eea689a36b479be31a32be421b6494b --- /dev/null +++ b/py311/lib/python3.11/site-packages/fontTools/__init__.py @@ -0,0 +1,8 @@ +import logging +from fontTools.misc.loggingTools import configLogger + +log = logging.getLogger(__name__) + +version = __version__ = "4.61.1" + +__all__ = ["version", "log", "configLogger"] diff --git a/py311/lib/python3.11/site-packages/fontTools/__main__.py b/py311/lib/python3.11/site-packages/fontTools/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..7c74ad3c86e54cb7e9939ed2bf96aa59cc6dcd06 --- /dev/null +++ b/py311/lib/python3.11/site-packages/fontTools/__main__.py @@ -0,0 +1,35 @@ +import sys + + +def main(args=None): + if args is None: + args = sys.argv[1:] + + # TODO Handle library-wide options. Eg.: + # --unicodedata + # --verbose / other logging stuff + + # TODO Allow a way to run arbitrary modules? Useful for setting + # library-wide options and calling another library. Eg.: + # + # $ fonttools --unicodedata=... fontmake ... + # + # This allows for a git-like command where thirdparty commands + # can be added. Should we just try importing the fonttools + # module first and try without if it fails? + + if len(sys.argv) < 2: + sys.argv.append("help") + if sys.argv[1] == "-h" or sys.argv[1] == "--help": + sys.argv[1] = "help" + mod = "fontTools." + sys.argv[1] + sys.argv[1] = sys.argv[0] + " " + sys.argv[1] + del sys.argv[0] + + import runpy + + runpy.run_module(mod, run_name="__main__") + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/py311/lib/python3.11/site-packages/fontTools/afmLib.py b/py311/lib/python3.11/site-packages/fontTools/afmLib.py new file mode 100644 index 0000000000000000000000000000000000000000..0aabf7f6356df7209ba15b4242cacc89cc558993 --- /dev/null +++ b/py311/lib/python3.11/site-packages/fontTools/afmLib.py @@ -0,0 +1,439 @@ +"""Module for reading and writing AFM (Adobe Font Metrics) files. + +Note that this has been designed to read in AFM files generated by Fontographer +and has not been tested on many other files. In particular, it does not +implement the whole Adobe AFM specification [#f1]_ but, it should read most +"common" AFM files. + +Here is an example of using `afmLib` to read, modify and write an AFM file: + + >>> from fontTools.afmLib import AFM + >>> f = AFM("Tests/afmLib/data/TestAFM.afm") + >>> + >>> # Accessing a pair gets you the kern value + >>> f[("V","A")] + -60 + >>> + >>> # Accessing a glyph name gets you metrics + >>> f["A"] + (65, 668, (8, -25, 660, 666)) + >>> # (charnum, width, bounding box) + >>> + >>> # Accessing an attribute gets you metadata + >>> f.FontName + 'TestFont-Regular' + >>> f.FamilyName + 'TestFont' + >>> f.Weight + 'Regular' + >>> f.XHeight + 500 + >>> f.Ascender + 750 + >>> + >>> # Attributes and items can also be set + >>> f[("A","V")] = -150 # Tighten kerning + >>> f.FontName = "TestFont Squished" + >>> + >>> # And the font written out again (remove the # in front) + >>> #f.write("testfont-squished.afm") + +.. rubric:: Footnotes + +.. [#f1] `Adobe Technote 5004 `_, + Adobe Font Metrics File Format Specification. + +""" + +import re + +# every single line starts with a "word" +identifierRE = re.compile(r"^([A-Za-z]+).*") + +# regular expression to parse char lines +charRE = re.compile( + r"(-?\d+)" # charnum + r"\s*;\s*WX\s+" # ; WX + r"(-?\d+)" # width + r"\s*;\s*N\s+" # ; N + r"([.A-Za-z0-9_]+)" # charname + r"\s*;\s*B\s+" # ; B + r"(-?\d+)" # left + r"\s+" + r"(-?\d+)" # bottom + r"\s+" + r"(-?\d+)" # right + r"\s+" + r"(-?\d+)" # top + r"\s*;\s*" # ; +) + +# regular expression to parse kerning lines +kernRE = re.compile( + r"([.A-Za-z0-9_]+)" # leftchar + r"\s+" + r"([.A-Za-z0-9_]+)" # rightchar + r"\s+" + r"(-?\d+)" # value + r"\s*" +) + +# regular expressions to parse composite info lines of the form: +# Aacute 2 ; PCC A 0 0 ; PCC acute 182 211 ; +compositeRE = re.compile( + r"([.A-Za-z0-9_]+)" # char name + r"\s+" + r"(\d+)" # number of parts + r"\s*;\s*" +) +componentRE = re.compile( + r"PCC\s+" # PPC + r"([.A-Za-z0-9_]+)" # base char name + r"\s+" + r"(-?\d+)" # x offset + r"\s+" + r"(-?\d+)" # y offset + r"\s*;\s*" +) + +preferredAttributeOrder = [ + "FontName", + "FullName", + "FamilyName", + "Weight", + "ItalicAngle", + "IsFixedPitch", + "FontBBox", + "UnderlinePosition", + "UnderlineThickness", + "Version", + "Notice", + "EncodingScheme", + "CapHeight", + "XHeight", + "Ascender", + "Descender", +] + + +class error(Exception): + pass + + +class AFM(object): + _attrs = None + + _keywords = [ + "StartFontMetrics", + "EndFontMetrics", + "StartCharMetrics", + "EndCharMetrics", + "StartKernData", + "StartKernPairs", + "EndKernPairs", + "EndKernData", + "StartComposites", + "EndComposites", + ] + + def __init__(self, path=None): + """AFM file reader. + + Instantiating an object with a path name will cause the file to be opened, + read, and parsed. Alternatively the path can be left unspecified, and a + file can be parsed later with the :meth:`read` method.""" + self._attrs = {} + self._chars = {} + self._kerning = {} + self._index = {} + self._comments = [] + self._composites = {} + if path is not None: + self.read(path) + + def read(self, path): + """Opens, reads and parses a file.""" + lines = readlines(path) + for line in lines: + if not line.strip(): + continue + m = identifierRE.match(line) + if m is None: + raise error("syntax error in AFM file: " + repr(line)) + + pos = m.regs[1][1] + word = line[:pos] + rest = line[pos:].strip() + if word in self._keywords: + continue + if word == "C": + self.parsechar(rest) + elif word == "KPX": + self.parsekernpair(rest) + elif word == "CC": + self.parsecomposite(rest) + else: + self.parseattr(word, rest) + + def parsechar(self, rest): + m = charRE.match(rest) + if m is None: + raise error("syntax error in AFM file: " + repr(rest)) + things = [] + for fr, to in m.regs[1:]: + things.append(rest[fr:to]) + charname = things[2] + del things[2] + charnum, width, l, b, r, t = (int(thing) for thing in things) + self._chars[charname] = charnum, width, (l, b, r, t) + + def parsekernpair(self, rest): + m = kernRE.match(rest) + if m is None: + raise error("syntax error in AFM file: " + repr(rest)) + things = [] + for fr, to in m.regs[1:]: + things.append(rest[fr:to]) + leftchar, rightchar, value = things + value = int(value) + self._kerning[(leftchar, rightchar)] = value + + def parseattr(self, word, rest): + if word == "FontBBox": + l, b, r, t = [int(thing) for thing in rest.split()] + self._attrs[word] = l, b, r, t + elif word == "Comment": + self._comments.append(rest) + else: + try: + value = int(rest) + except (ValueError, OverflowError): + self._attrs[word] = rest + else: + self._attrs[word] = value + + def parsecomposite(self, rest): + m = compositeRE.match(rest) + if m is None: + raise error("syntax error in AFM file: " + repr(rest)) + charname = m.group(1) + ncomponents = int(m.group(2)) + rest = rest[m.regs[0][1] :] + components = [] + while True: + m = componentRE.match(rest) + if m is None: + raise error("syntax error in AFM file: " + repr(rest)) + basechar = m.group(1) + xoffset = int(m.group(2)) + yoffset = int(m.group(3)) + components.append((basechar, xoffset, yoffset)) + rest = rest[m.regs[0][1] :] + if not rest: + break + assert len(components) == ncomponents + self._composites[charname] = components + + def write(self, path, sep="\r"): + """Writes out an AFM font to the given path.""" + import time + + lines = [ + "StartFontMetrics 2.0", + "Comment Generated by afmLib; at %s" + % (time.strftime("%m/%d/%Y %H:%M:%S", time.localtime(time.time()))), + ] + + # write comments, assuming (possibly wrongly!) they should + # all appear at the top + for comment in self._comments: + lines.append("Comment " + comment) + + # write attributes, first the ones we know about, in + # a preferred order + attrs = self._attrs + for attr in preferredAttributeOrder: + if attr in attrs: + value = attrs[attr] + if attr == "FontBBox": + value = "%s %s %s %s" % value + lines.append(attr + " " + str(value)) + # then write the attributes we don't know about, + # in alphabetical order + items = sorted(attrs.items()) + for attr, value in items: + if attr in preferredAttributeOrder: + continue + lines.append(attr + " " + str(value)) + + # write char metrics + lines.append("StartCharMetrics " + repr(len(self._chars))) + items = [ + (charnum, (charname, width, box)) + for charname, (charnum, width, box) in self._chars.items() + ] + + def myKey(a): + """Custom key function to make sure unencoded chars (-1) + end up at the end of the list after sorting.""" + if a[0] == -1: + a = (0xFFFF,) + a[1:] # 0xffff is an arbitrary large number + return a + + items.sort(key=myKey) + + for charnum, (charname, width, (l, b, r, t)) in items: + lines.append( + "C %d ; WX %d ; N %s ; B %d %d %d %d ;" + % (charnum, width, charname, l, b, r, t) + ) + lines.append("EndCharMetrics") + + # write kerning info + lines.append("StartKernData") + lines.append("StartKernPairs " + repr(len(self._kerning))) + items = sorted(self._kerning.items()) + for (leftchar, rightchar), value in items: + lines.append("KPX %s %s %d" % (leftchar, rightchar, value)) + lines.append("EndKernPairs") + lines.append("EndKernData") + + if self._composites: + composites = sorted(self._composites.items()) + lines.append("StartComposites %s" % len(self._composites)) + for charname, components in composites: + line = "CC %s %s ;" % (charname, len(components)) + for basechar, xoffset, yoffset in components: + line = line + " PCC %s %s %s ;" % (basechar, xoffset, yoffset) + lines.append(line) + lines.append("EndComposites") + + lines.append("EndFontMetrics") + + writelines(path, lines, sep) + + def has_kernpair(self, pair): + """Returns `True` if the given glyph pair (specified as a tuple) exists + in the kerning dictionary.""" + return pair in self._kerning + + def kernpairs(self): + """Returns a list of all kern pairs in the kerning dictionary.""" + return list(self._kerning.keys()) + + def has_char(self, char): + """Returns `True` if the given glyph exists in the font.""" + return char in self._chars + + def chars(self): + """Returns a list of all glyph names in the font.""" + return list(self._chars.keys()) + + def comments(self): + """Returns all comments from the file.""" + return self._comments + + def addComment(self, comment): + """Adds a new comment to the file.""" + self._comments.append(comment) + + def addComposite(self, glyphName, components): + """Specifies that the glyph `glyphName` is made up of the given components. + The components list should be of the following form:: + + [ + (glyphname, xOffset, yOffset), + ... + ] + + """ + self._composites[glyphName] = components + + def __getattr__(self, attr): + if attr in self._attrs: + return self._attrs[attr] + else: + raise AttributeError(attr) + + def __setattr__(self, attr, value): + # all attrs *not* starting with "_" are consider to be AFM keywords + if attr[:1] == "_": + self.__dict__[attr] = value + else: + self._attrs[attr] = value + + def __delattr__(self, attr): + # all attrs *not* starting with "_" are consider to be AFM keywords + if attr[:1] == "_": + try: + del self.__dict__[attr] + except KeyError: + raise AttributeError(attr) + else: + try: + del self._attrs[attr] + except KeyError: + raise AttributeError(attr) + + def __getitem__(self, key): + if isinstance(key, tuple): + # key is a tuple, return the kernpair + return self._kerning[key] + else: + # return the metrics instead + return self._chars[key] + + def __setitem__(self, key, value): + if isinstance(key, tuple): + # key is a tuple, set kernpair + self._kerning[key] = value + else: + # set char metrics + self._chars[key] = value + + def __delitem__(self, key): + if isinstance(key, tuple): + # key is a tuple, del kernpair + del self._kerning[key] + else: + # del char metrics + del self._chars[key] + + def __repr__(self): + if hasattr(self, "FullName"): + return "" % self.FullName + else: + return "" % id(self) + + +def readlines(path): + with open(path, "r", encoding="ascii") as f: + data = f.read() + return data.splitlines() + + +def writelines(path, lines, sep="\r"): + with open(path, "w", encoding="ascii", newline=sep) as f: + f.write("\n".join(lines) + "\n") + + +if __name__ == "__main__": + import EasyDialogs + + path = EasyDialogs.AskFileForOpen() + if path: + afm = AFM(path) + char = "A" + if afm.has_char(char): + print(afm[char]) # print charnum, width and boundingbox + pair = ("A", "V") + if afm.has_kernpair(pair): + print(afm[pair]) # print kerning value for pair + print(afm.Version) # various other afm entries have become attributes + print(afm.Weight) + # afm.comments() returns a list of all Comment lines found in the AFM + print(afm.comments()) + # print afm.chars() + # print afm.kernpairs() + print(afm) + afm.write(path + ".muck") diff --git a/py311/lib/python3.11/site-packages/fontTools/agl.py b/py311/lib/python3.11/site-packages/fontTools/agl.py new file mode 100644 index 0000000000000000000000000000000000000000..d6994628dd3a68f9592efd8c09b0bf56c2a67056 --- /dev/null +++ b/py311/lib/python3.11/site-packages/fontTools/agl.py @@ -0,0 +1,5233 @@ +# -*- coding: utf-8 -*- +# The tables below are taken from +# https://github.com/adobe-type-tools/agl-aglfn/raw/4036a9ca80a62f64f9de4f7321a9a045ad0ecfd6/glyphlist.txt +# and +# https://github.com/adobe-type-tools/agl-aglfn/raw/4036a9ca80a62f64f9de4f7321a9a045ad0ecfd6/aglfn.txt +""" +Interface to the Adobe Glyph List + +This module exists to convert glyph names from the Adobe Glyph List +to their Unicode equivalents. Example usage: + + >>> from fontTools.agl import toUnicode + >>> toUnicode("nahiragana") + 'な' + +It also contains two dictionaries, ``UV2AGL`` and ``AGL2UV``, which map from +Unicode codepoints to AGL names and vice versa: + + >>> import fontTools + >>> fontTools.agl.UV2AGL[ord("?")] + 'question' + >>> fontTools.agl.AGL2UV["wcircumflex"] + 373 + +This is used by fontTools when it has to construct glyph names for a font which +doesn't include any (e.g. format 3.0 post tables). +""" + +from fontTools.misc.textTools import tostr +import re + + +_aglText = """\ +# ----------------------------------------------------------- +# Copyright 2002-2019 Adobe (http://www.adobe.com/). +# +# Redistribution and use in source and binary forms, with or +# without modification, are permitted provided that the +# following conditions are met: +# +# Redistributions of source code must retain the above +# copyright notice, this list of conditions and the following +# disclaimer. +# +# Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials +# provided with the distribution. +# +# Neither the name of Adobe nor the names of its contributors +# may be used to endorse or promote products derived from this +# software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND +# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# ----------------------------------------------------------- +# Name: Adobe Glyph List +# Table version: 2.0 +# Date: September 20, 2002 +# URL: https://github.com/adobe-type-tools/agl-aglfn +# +# Format: two semicolon-delimited fields: +# (1) glyph name--upper/lowercase letters and digits +# (2) Unicode scalar value--four uppercase hexadecimal digits +# +A;0041 +AE;00C6 +AEacute;01FC +AEmacron;01E2 +AEsmall;F7E6 +Aacute;00C1 +Aacutesmall;F7E1 +Abreve;0102 +Abreveacute;1EAE +Abrevecyrillic;04D0 +Abrevedotbelow;1EB6 +Abrevegrave;1EB0 +Abrevehookabove;1EB2 +Abrevetilde;1EB4 +Acaron;01CD +Acircle;24B6 +Acircumflex;00C2 +Acircumflexacute;1EA4 +Acircumflexdotbelow;1EAC +Acircumflexgrave;1EA6 +Acircumflexhookabove;1EA8 +Acircumflexsmall;F7E2 +Acircumflextilde;1EAA +Acute;F6C9 +Acutesmall;F7B4 +Acyrillic;0410 +Adblgrave;0200 +Adieresis;00C4 +Adieresiscyrillic;04D2 +Adieresismacron;01DE +Adieresissmall;F7E4 +Adotbelow;1EA0 +Adotmacron;01E0 +Agrave;00C0 +Agravesmall;F7E0 +Ahookabove;1EA2 +Aiecyrillic;04D4 +Ainvertedbreve;0202 +Alpha;0391 +Alphatonos;0386 +Amacron;0100 +Amonospace;FF21 +Aogonek;0104 +Aring;00C5 +Aringacute;01FA +Aringbelow;1E00 +Aringsmall;F7E5 +Asmall;F761 +Atilde;00C3 +Atildesmall;F7E3 +Aybarmenian;0531 +B;0042 +Bcircle;24B7 +Bdotaccent;1E02 +Bdotbelow;1E04 +Becyrillic;0411 +Benarmenian;0532 +Beta;0392 +Bhook;0181 +Blinebelow;1E06 +Bmonospace;FF22 +Brevesmall;F6F4 +Bsmall;F762 +Btopbar;0182 +C;0043 +Caarmenian;053E +Cacute;0106 +Caron;F6CA +Caronsmall;F6F5 +Ccaron;010C +Ccedilla;00C7 +Ccedillaacute;1E08 +Ccedillasmall;F7E7 +Ccircle;24B8 +Ccircumflex;0108 +Cdot;010A +Cdotaccent;010A +Cedillasmall;F7B8 +Chaarmenian;0549 +Cheabkhasiancyrillic;04BC +Checyrillic;0427 +Chedescenderabkhasiancyrillic;04BE +Chedescendercyrillic;04B6 +Chedieresiscyrillic;04F4 +Cheharmenian;0543 +Chekhakassiancyrillic;04CB +Cheverticalstrokecyrillic;04B8 +Chi;03A7 +Chook;0187 +Circumflexsmall;F6F6 +Cmonospace;FF23 +Coarmenian;0551 +Csmall;F763 +D;0044 +DZ;01F1 +DZcaron;01C4 +Daarmenian;0534 +Dafrican;0189 +Dcaron;010E +Dcedilla;1E10 +Dcircle;24B9 +Dcircumflexbelow;1E12 +Dcroat;0110 +Ddotaccent;1E0A +Ddotbelow;1E0C +Decyrillic;0414 +Deicoptic;03EE +Delta;2206 +Deltagreek;0394 +Dhook;018A +Dieresis;F6CB +DieresisAcute;F6CC +DieresisGrave;F6CD +Dieresissmall;F7A8 +Digammagreek;03DC +Djecyrillic;0402 +Dlinebelow;1E0E +Dmonospace;FF24 +Dotaccentsmall;F6F7 +Dslash;0110 +Dsmall;F764 +Dtopbar;018B +Dz;01F2 +Dzcaron;01C5 +Dzeabkhasiancyrillic;04E0 +Dzecyrillic;0405 +Dzhecyrillic;040F +E;0045 +Eacute;00C9 +Eacutesmall;F7E9 +Ebreve;0114 +Ecaron;011A +Ecedillabreve;1E1C +Echarmenian;0535 +Ecircle;24BA +Ecircumflex;00CA +Ecircumflexacute;1EBE +Ecircumflexbelow;1E18 +Ecircumflexdotbelow;1EC6 +Ecircumflexgrave;1EC0 +Ecircumflexhookabove;1EC2 +Ecircumflexsmall;F7EA +Ecircumflextilde;1EC4 +Ecyrillic;0404 +Edblgrave;0204 +Edieresis;00CB +Edieresissmall;F7EB +Edot;0116 +Edotaccent;0116 +Edotbelow;1EB8 +Efcyrillic;0424 +Egrave;00C8 +Egravesmall;F7E8 +Eharmenian;0537 +Ehookabove;1EBA +Eightroman;2167 +Einvertedbreve;0206 +Eiotifiedcyrillic;0464 +Elcyrillic;041B +Elevenroman;216A +Emacron;0112 +Emacronacute;1E16 +Emacrongrave;1E14 +Emcyrillic;041C +Emonospace;FF25 +Encyrillic;041D +Endescendercyrillic;04A2 +Eng;014A +Enghecyrillic;04A4 +Enhookcyrillic;04C7 +Eogonek;0118 +Eopen;0190 +Epsilon;0395 +Epsilontonos;0388 +Ercyrillic;0420 +Ereversed;018E +Ereversedcyrillic;042D +Escyrillic;0421 +Esdescendercyrillic;04AA +Esh;01A9 +Esmall;F765 +Eta;0397 +Etarmenian;0538 +Etatonos;0389 +Eth;00D0 +Ethsmall;F7F0 +Etilde;1EBC +Etildebelow;1E1A +Euro;20AC +Ezh;01B7 +Ezhcaron;01EE +Ezhreversed;01B8 +F;0046 +Fcircle;24BB +Fdotaccent;1E1E +Feharmenian;0556 +Feicoptic;03E4 +Fhook;0191 +Fitacyrillic;0472 +Fiveroman;2164 +Fmonospace;FF26 +Fourroman;2163 +Fsmall;F766 +G;0047 +GBsquare;3387 +Gacute;01F4 +Gamma;0393 +Gammaafrican;0194 +Gangiacoptic;03EA +Gbreve;011E +Gcaron;01E6 +Gcedilla;0122 +Gcircle;24BC +Gcircumflex;011C +Gcommaaccent;0122 +Gdot;0120 +Gdotaccent;0120 +Gecyrillic;0413 +Ghadarmenian;0542 +Ghemiddlehookcyrillic;0494 +Ghestrokecyrillic;0492 +Gheupturncyrillic;0490 +Ghook;0193 +Gimarmenian;0533 +Gjecyrillic;0403 +Gmacron;1E20 +Gmonospace;FF27 +Grave;F6CE +Gravesmall;F760 +Gsmall;F767 +Gsmallhook;029B +Gstroke;01E4 +H;0048 +H18533;25CF +H18543;25AA +H18551;25AB +H22073;25A1 +HPsquare;33CB +Haabkhasiancyrillic;04A8 +Hadescendercyrillic;04B2 +Hardsigncyrillic;042A +Hbar;0126 +Hbrevebelow;1E2A +Hcedilla;1E28 +Hcircle;24BD +Hcircumflex;0124 +Hdieresis;1E26 +Hdotaccent;1E22 +Hdotbelow;1E24 +Hmonospace;FF28 +Hoarmenian;0540 +Horicoptic;03E8 +Hsmall;F768 +Hungarumlaut;F6CF +Hungarumlautsmall;F6F8 +Hzsquare;3390 +I;0049 +IAcyrillic;042F +IJ;0132 +IUcyrillic;042E +Iacute;00CD +Iacutesmall;F7ED +Ibreve;012C +Icaron;01CF +Icircle;24BE +Icircumflex;00CE +Icircumflexsmall;F7EE +Icyrillic;0406 +Idblgrave;0208 +Idieresis;00CF +Idieresisacute;1E2E +Idieresiscyrillic;04E4 +Idieresissmall;F7EF +Idot;0130 +Idotaccent;0130 +Idotbelow;1ECA +Iebrevecyrillic;04D6 +Iecyrillic;0415 +Ifraktur;2111 +Igrave;00CC +Igravesmall;F7EC +Ihookabove;1EC8 +Iicyrillic;0418 +Iinvertedbreve;020A +Iishortcyrillic;0419 +Imacron;012A +Imacroncyrillic;04E2 +Imonospace;FF29 +Iniarmenian;053B +Iocyrillic;0401 +Iogonek;012E +Iota;0399 +Iotaafrican;0196 +Iotadieresis;03AA +Iotatonos;038A +Ismall;F769 +Istroke;0197 +Itilde;0128 +Itildebelow;1E2C +Izhitsacyrillic;0474 +Izhitsadblgravecyrillic;0476 +J;004A +Jaarmenian;0541 +Jcircle;24BF +Jcircumflex;0134 +Jecyrillic;0408 +Jheharmenian;054B +Jmonospace;FF2A +Jsmall;F76A +K;004B +KBsquare;3385 +KKsquare;33CD +Kabashkircyrillic;04A0 +Kacute;1E30 +Kacyrillic;041A +Kadescendercyrillic;049A +Kahookcyrillic;04C3 +Kappa;039A +Kastrokecyrillic;049E +Kaverticalstrokecyrillic;049C +Kcaron;01E8 +Kcedilla;0136 +Kcircle;24C0 +Kcommaaccent;0136 +Kdotbelow;1E32 +Keharmenian;0554 +Kenarmenian;053F +Khacyrillic;0425 +Kheicoptic;03E6 +Khook;0198 +Kjecyrillic;040C +Klinebelow;1E34 +Kmonospace;FF2B +Koppacyrillic;0480 +Koppagreek;03DE +Ksicyrillic;046E +Ksmall;F76B +L;004C +LJ;01C7 +LL;F6BF +Lacute;0139 +Lambda;039B +Lcaron;013D +Lcedilla;013B +Lcircle;24C1 +Lcircumflexbelow;1E3C +Lcommaaccent;013B +Ldot;013F +Ldotaccent;013F +Ldotbelow;1E36 +Ldotbelowmacron;1E38 +Liwnarmenian;053C +Lj;01C8 +Ljecyrillic;0409 +Llinebelow;1E3A +Lmonospace;FF2C +Lslash;0141 +Lslashsmall;F6F9 +Lsmall;F76C +M;004D +MBsquare;3386 +Macron;F6D0 +Macronsmall;F7AF +Macute;1E3E +Mcircle;24C2 +Mdotaccent;1E40 +Mdotbelow;1E42 +Menarmenian;0544 +Mmonospace;FF2D +Msmall;F76D +Mturned;019C +Mu;039C +N;004E +NJ;01CA +Nacute;0143 +Ncaron;0147 +Ncedilla;0145 +Ncircle;24C3 +Ncircumflexbelow;1E4A +Ncommaaccent;0145 +Ndotaccent;1E44 +Ndotbelow;1E46 +Nhookleft;019D +Nineroman;2168 +Nj;01CB +Njecyrillic;040A +Nlinebelow;1E48 +Nmonospace;FF2E +Nowarmenian;0546 +Nsmall;F76E +Ntilde;00D1 +Ntildesmall;F7F1 +Nu;039D +O;004F +OE;0152 +OEsmall;F6FA +Oacute;00D3 +Oacutesmall;F7F3 +Obarredcyrillic;04E8 +Obarreddieresiscyrillic;04EA +Obreve;014E +Ocaron;01D1 +Ocenteredtilde;019F +Ocircle;24C4 +Ocircumflex;00D4 +Ocircumflexacute;1ED0 +Ocircumflexdotbelow;1ED8 +Ocircumflexgrave;1ED2 +Ocircumflexhookabove;1ED4 +Ocircumflexsmall;F7F4 +Ocircumflextilde;1ED6 +Ocyrillic;041E +Odblacute;0150 +Odblgrave;020C +Odieresis;00D6 +Odieresiscyrillic;04E6 +Odieresissmall;F7F6 +Odotbelow;1ECC +Ogoneksmall;F6FB +Ograve;00D2 +Ogravesmall;F7F2 +Oharmenian;0555 +Ohm;2126 +Ohookabove;1ECE +Ohorn;01A0 +Ohornacute;1EDA +Ohorndotbelow;1EE2 +Ohorngrave;1EDC +Ohornhookabove;1EDE +Ohorntilde;1EE0 +Ohungarumlaut;0150 +Oi;01A2 +Oinvertedbreve;020E +Omacron;014C +Omacronacute;1E52 +Omacrongrave;1E50 +Omega;2126 +Omegacyrillic;0460 +Omegagreek;03A9 +Omegaroundcyrillic;047A +Omegatitlocyrillic;047C +Omegatonos;038F +Omicron;039F +Omicrontonos;038C +Omonospace;FF2F +Oneroman;2160 +Oogonek;01EA +Oogonekmacron;01EC +Oopen;0186 +Oslash;00D8 +Oslashacute;01FE +Oslashsmall;F7F8 +Osmall;F76F +Ostrokeacute;01FE +Otcyrillic;047E +Otilde;00D5 +Otildeacute;1E4C +Otildedieresis;1E4E +Otildesmall;F7F5 +P;0050 +Pacute;1E54 +Pcircle;24C5 +Pdotaccent;1E56 +Pecyrillic;041F +Peharmenian;054A +Pemiddlehookcyrillic;04A6 +Phi;03A6 +Phook;01A4 +Pi;03A0 +Piwrarmenian;0553 +Pmonospace;FF30 +Psi;03A8 +Psicyrillic;0470 +Psmall;F770 +Q;0051 +Qcircle;24C6 +Qmonospace;FF31 +Qsmall;F771 +R;0052 +Raarmenian;054C +Racute;0154 +Rcaron;0158 +Rcedilla;0156 +Rcircle;24C7 +Rcommaaccent;0156 +Rdblgrave;0210 +Rdotaccent;1E58 +Rdotbelow;1E5A +Rdotbelowmacron;1E5C +Reharmenian;0550 +Rfraktur;211C +Rho;03A1 +Ringsmall;F6FC +Rinvertedbreve;0212 +Rlinebelow;1E5E +Rmonospace;FF32 +Rsmall;F772 +Rsmallinverted;0281 +Rsmallinvertedsuperior;02B6 +S;0053 +SF010000;250C +SF020000;2514 +SF030000;2510 +SF040000;2518 +SF050000;253C +SF060000;252C +SF070000;2534 +SF080000;251C +SF090000;2524 +SF100000;2500 +SF110000;2502 +SF190000;2561 +SF200000;2562 +SF210000;2556 +SF220000;2555 +SF230000;2563 +SF240000;2551 +SF250000;2557 +SF260000;255D +SF270000;255C +SF280000;255B +SF360000;255E +SF370000;255F +SF380000;255A +SF390000;2554 +SF400000;2569 +SF410000;2566 +SF420000;2560 +SF430000;2550 +SF440000;256C +SF450000;2567 +SF460000;2568 +SF470000;2564 +SF480000;2565 +SF490000;2559 +SF500000;2558 +SF510000;2552 +SF520000;2553 +SF530000;256B +SF540000;256A +Sacute;015A +Sacutedotaccent;1E64 +Sampigreek;03E0 +Scaron;0160 +Scarondotaccent;1E66 +Scaronsmall;F6FD +Scedilla;015E +Schwa;018F +Schwacyrillic;04D8 +Schwadieresiscyrillic;04DA +Scircle;24C8 +Scircumflex;015C +Scommaaccent;0218 +Sdotaccent;1E60 +Sdotbelow;1E62 +Sdotbelowdotaccent;1E68 +Seharmenian;054D +Sevenroman;2166 +Shaarmenian;0547 +Shacyrillic;0428 +Shchacyrillic;0429 +Sheicoptic;03E2 +Shhacyrillic;04BA +Shimacoptic;03EC +Sigma;03A3 +Sixroman;2165 +Smonospace;FF33 +Softsigncyrillic;042C +Ssmall;F773 +Stigmagreek;03DA +T;0054 +Tau;03A4 +Tbar;0166 +Tcaron;0164 +Tcedilla;0162 +Tcircle;24C9 +Tcircumflexbelow;1E70 +Tcommaaccent;0162 +Tdotaccent;1E6A +Tdotbelow;1E6C +Tecyrillic;0422 +Tedescendercyrillic;04AC +Tenroman;2169 +Tetsecyrillic;04B4 +Theta;0398 +Thook;01AC +Thorn;00DE +Thornsmall;F7FE +Threeroman;2162 +Tildesmall;F6FE +Tiwnarmenian;054F +Tlinebelow;1E6E +Tmonospace;FF34 +Toarmenian;0539 +Tonefive;01BC +Tonesix;0184 +Tonetwo;01A7 +Tretroflexhook;01AE +Tsecyrillic;0426 +Tshecyrillic;040B +Tsmall;F774 +Twelveroman;216B +Tworoman;2161 +U;0055 +Uacute;00DA +Uacutesmall;F7FA +Ubreve;016C +Ucaron;01D3 +Ucircle;24CA +Ucircumflex;00DB +Ucircumflexbelow;1E76 +Ucircumflexsmall;F7FB +Ucyrillic;0423 +Udblacute;0170 +Udblgrave;0214 +Udieresis;00DC +Udieresisacute;01D7 +Udieresisbelow;1E72 +Udieresiscaron;01D9 +Udieresiscyrillic;04F0 +Udieresisgrave;01DB +Udieresismacron;01D5 +Udieresissmall;F7FC +Udotbelow;1EE4 +Ugrave;00D9 +Ugravesmall;F7F9 +Uhookabove;1EE6 +Uhorn;01AF +Uhornacute;1EE8 +Uhorndotbelow;1EF0 +Uhorngrave;1EEA +Uhornhookabove;1EEC +Uhorntilde;1EEE +Uhungarumlaut;0170 +Uhungarumlautcyrillic;04F2 +Uinvertedbreve;0216 +Ukcyrillic;0478 +Umacron;016A +Umacroncyrillic;04EE +Umacrondieresis;1E7A +Umonospace;FF35 +Uogonek;0172 +Upsilon;03A5 +Upsilon1;03D2 +Upsilonacutehooksymbolgreek;03D3 +Upsilonafrican;01B1 +Upsilondieresis;03AB +Upsilondieresishooksymbolgreek;03D4 +Upsilonhooksymbol;03D2 +Upsilontonos;038E +Uring;016E +Ushortcyrillic;040E +Usmall;F775 +Ustraightcyrillic;04AE +Ustraightstrokecyrillic;04B0 +Utilde;0168 +Utildeacute;1E78 +Utildebelow;1E74 +V;0056 +Vcircle;24CB +Vdotbelow;1E7E +Vecyrillic;0412 +Vewarmenian;054E +Vhook;01B2 +Vmonospace;FF36 +Voarmenian;0548 +Vsmall;F776 +Vtilde;1E7C +W;0057 +Wacute;1E82 +Wcircle;24CC +Wcircumflex;0174 +Wdieresis;1E84 +Wdotaccent;1E86 +Wdotbelow;1E88 +Wgrave;1E80 +Wmonospace;FF37 +Wsmall;F777 +X;0058 +Xcircle;24CD +Xdieresis;1E8C +Xdotaccent;1E8A +Xeharmenian;053D +Xi;039E +Xmonospace;FF38 +Xsmall;F778 +Y;0059 +Yacute;00DD +Yacutesmall;F7FD +Yatcyrillic;0462 +Ycircle;24CE +Ycircumflex;0176 +Ydieresis;0178 +Ydieresissmall;F7FF +Ydotaccent;1E8E +Ydotbelow;1EF4 +Yericyrillic;042B +Yerudieresiscyrillic;04F8 +Ygrave;1EF2 +Yhook;01B3 +Yhookabove;1EF6 +Yiarmenian;0545 +Yicyrillic;0407 +Yiwnarmenian;0552 +Ymonospace;FF39 +Ysmall;F779 +Ytilde;1EF8 +Yusbigcyrillic;046A +Yusbigiotifiedcyrillic;046C +Yuslittlecyrillic;0466 +Yuslittleiotifiedcyrillic;0468 +Z;005A +Zaarmenian;0536 +Zacute;0179 +Zcaron;017D +Zcaronsmall;F6FF +Zcircle;24CF +Zcircumflex;1E90 +Zdot;017B +Zdotaccent;017B +Zdotbelow;1E92 +Zecyrillic;0417 +Zedescendercyrillic;0498 +Zedieresiscyrillic;04DE +Zeta;0396 +Zhearmenian;053A +Zhebrevecyrillic;04C1 +Zhecyrillic;0416 +Zhedescendercyrillic;0496 +Zhedieresiscyrillic;04DC +Zlinebelow;1E94 +Zmonospace;FF3A +Zsmall;F77A +Zstroke;01B5 +a;0061 +aabengali;0986 +aacute;00E1 +aadeva;0906 +aagujarati;0A86 +aagurmukhi;0A06 +aamatragurmukhi;0A3E +aarusquare;3303 +aavowelsignbengali;09BE +aavowelsigndeva;093E +aavowelsigngujarati;0ABE +abbreviationmarkarmenian;055F +abbreviationsigndeva;0970 +abengali;0985 +abopomofo;311A +abreve;0103 +abreveacute;1EAF +abrevecyrillic;04D1 +abrevedotbelow;1EB7 +abrevegrave;1EB1 +abrevehookabove;1EB3 +abrevetilde;1EB5 +acaron;01CE +acircle;24D0 +acircumflex;00E2 +acircumflexacute;1EA5 +acircumflexdotbelow;1EAD +acircumflexgrave;1EA7 +acircumflexhookabove;1EA9 +acircumflextilde;1EAB +acute;00B4 +acutebelowcmb;0317 +acutecmb;0301 +acutecomb;0301 +acutedeva;0954 +acutelowmod;02CF +acutetonecmb;0341 +acyrillic;0430 +adblgrave;0201 +addakgurmukhi;0A71 +adeva;0905 +adieresis;00E4 +adieresiscyrillic;04D3 +adieresismacron;01DF +adotbelow;1EA1 +adotmacron;01E1 +ae;00E6 +aeacute;01FD +aekorean;3150 +aemacron;01E3 +afii00208;2015 +afii08941;20A4 +afii10017;0410 +afii10018;0411 +afii10019;0412 +afii10020;0413 +afii10021;0414 +afii10022;0415 +afii10023;0401 +afii10024;0416 +afii10025;0417 +afii10026;0418 +afii10027;0419 +afii10028;041A +afii10029;041B +afii10030;041C +afii10031;041D +afii10032;041E +afii10033;041F +afii10034;0420 +afii10035;0421 +afii10036;0422 +afii10037;0423 +afii10038;0424 +afii10039;0425 +afii10040;0426 +afii10041;0427 +afii10042;0428 +afii10043;0429 +afii10044;042A +afii10045;042B +afii10046;042C +afii10047;042D +afii10048;042E +afii10049;042F +afii10050;0490 +afii10051;0402 +afii10052;0403 +afii10053;0404 +afii10054;0405 +afii10055;0406 +afii10056;0407 +afii10057;0408 +afii10058;0409 +afii10059;040A +afii10060;040B +afii10061;040C +afii10062;040E +afii10063;F6C4 +afii10064;F6C5 +afii10065;0430 +afii10066;0431 +afii10067;0432 +afii10068;0433 +afii10069;0434 +afii10070;0435 +afii10071;0451 +afii10072;0436 +afii10073;0437 +afii10074;0438 +afii10075;0439 +afii10076;043A +afii10077;043B +afii10078;043C +afii10079;043D +afii10080;043E +afii10081;043F +afii10082;0440 +afii10083;0441 +afii10084;0442 +afii10085;0443 +afii10086;0444 +afii10087;0445 +afii10088;0446 +afii10089;0447 +afii10090;0448 +afii10091;0449 +afii10092;044A +afii10093;044B +afii10094;044C +afii10095;044D +afii10096;044E +afii10097;044F +afii10098;0491 +afii10099;0452 +afii10100;0453 +afii10101;0454 +afii10102;0455 +afii10103;0456 +afii10104;0457 +afii10105;0458 +afii10106;0459 +afii10107;045A +afii10108;045B +afii10109;045C +afii10110;045E +afii10145;040F +afii10146;0462 +afii10147;0472 +afii10148;0474 +afii10192;F6C6 +afii10193;045F +afii10194;0463 +afii10195;0473 +afii10196;0475 +afii10831;F6C7 +afii10832;F6C8 +afii10846;04D9 +afii299;200E +afii300;200F +afii301;200D +afii57381;066A +afii57388;060C +afii57392;0660 +afii57393;0661 +afii57394;0662 +afii57395;0663 +afii57396;0664 +afii57397;0665 +afii57398;0666 +afii57399;0667 +afii57400;0668 +afii57401;0669 +afii57403;061B +afii57407;061F +afii57409;0621 +afii57410;0622 +afii57411;0623 +afii57412;0624 +afii57413;0625 +afii57414;0626 +afii57415;0627 +afii57416;0628 +afii57417;0629 +afii57418;062A +afii57419;062B +afii57420;062C +afii57421;062D +afii57422;062E +afii57423;062F +afii57424;0630 +afii57425;0631 +afii57426;0632 +afii57427;0633 +afii57428;0634 +afii57429;0635 +afii57430;0636 +afii57431;0637 +afii57432;0638 +afii57433;0639 +afii57434;063A +afii57440;0640 +afii57441;0641 +afii57442;0642 +afii57443;0643 +afii57444;0644 +afii57445;0645 +afii57446;0646 +afii57448;0648 +afii57449;0649 +afii57450;064A +afii57451;064B +afii57452;064C +afii57453;064D +afii57454;064E +afii57455;064F +afii57456;0650 +afii57457;0651 +afii57458;0652 +afii57470;0647 +afii57505;06A4 +afii57506;067E +afii57507;0686 +afii57508;0698 +afii57509;06AF +afii57511;0679 +afii57512;0688 +afii57513;0691 +afii57514;06BA +afii57519;06D2 +afii57534;06D5 +afii57636;20AA +afii57645;05BE +afii57658;05C3 +afii57664;05D0 +afii57665;05D1 +afii57666;05D2 +afii57667;05D3 +afii57668;05D4 +afii57669;05D5 +afii57670;05D6 +afii57671;05D7 +afii57672;05D8 +afii57673;05D9 +afii57674;05DA +afii57675;05DB +afii57676;05DC +afii57677;05DD +afii57678;05DE +afii57679;05DF +afii57680;05E0 +afii57681;05E1 +afii57682;05E2 +afii57683;05E3 +afii57684;05E4 +afii57685;05E5 +afii57686;05E6 +afii57687;05E7 +afii57688;05E8 +afii57689;05E9 +afii57690;05EA +afii57694;FB2A +afii57695;FB2B +afii57700;FB4B +afii57705;FB1F +afii57716;05F0 +afii57717;05F1 +afii57718;05F2 +afii57723;FB35 +afii57793;05B4 +afii57794;05B5 +afii57795;05B6 +afii57796;05BB +afii57797;05B8 +afii57798;05B7 +afii57799;05B0 +afii57800;05B2 +afii57801;05B1 +afii57802;05B3 +afii57803;05C2 +afii57804;05C1 +afii57806;05B9 +afii57807;05BC +afii57839;05BD +afii57841;05BF +afii57842;05C0 +afii57929;02BC +afii61248;2105 +afii61289;2113 +afii61352;2116 +afii61573;202C +afii61574;202D +afii61575;202E +afii61664;200C +afii63167;066D +afii64937;02BD +agrave;00E0 +agujarati;0A85 +agurmukhi;0A05 +ahiragana;3042 +ahookabove;1EA3 +aibengali;0990 +aibopomofo;311E +aideva;0910 +aiecyrillic;04D5 +aigujarati;0A90 +aigurmukhi;0A10 +aimatragurmukhi;0A48 +ainarabic;0639 +ainfinalarabic;FECA +aininitialarabic;FECB +ainmedialarabic;FECC +ainvertedbreve;0203 +aivowelsignbengali;09C8 +aivowelsigndeva;0948 +aivowelsigngujarati;0AC8 +akatakana;30A2 +akatakanahalfwidth;FF71 +akorean;314F +alef;05D0 +alefarabic;0627 +alefdageshhebrew;FB30 +aleffinalarabic;FE8E +alefhamzaabovearabic;0623 +alefhamzaabovefinalarabic;FE84 +alefhamzabelowarabic;0625 +alefhamzabelowfinalarabic;FE88 +alefhebrew;05D0 +aleflamedhebrew;FB4F +alefmaddaabovearabic;0622 +alefmaddaabovefinalarabic;FE82 +alefmaksuraarabic;0649 +alefmaksurafinalarabic;FEF0 +alefmaksurainitialarabic;FEF3 +alefmaksuramedialarabic;FEF4 +alefpatahhebrew;FB2E +alefqamatshebrew;FB2F +aleph;2135 +allequal;224C +alpha;03B1 +alphatonos;03AC +amacron;0101 +amonospace;FF41 +ampersand;0026 +ampersandmonospace;FF06 +ampersandsmall;F726 +amsquare;33C2 +anbopomofo;3122 +angbopomofo;3124 +angkhankhuthai;0E5A +angle;2220 +anglebracketleft;3008 +anglebracketleftvertical;FE3F +anglebracketright;3009 +anglebracketrightvertical;FE40 +angleleft;2329 +angleright;232A +angstrom;212B +anoteleia;0387 +anudattadeva;0952 +anusvarabengali;0982 +anusvaradeva;0902 +anusvaragujarati;0A82 +aogonek;0105 +apaatosquare;3300 +aparen;249C +apostrophearmenian;055A +apostrophemod;02BC +apple;F8FF +approaches;2250 +approxequal;2248 +approxequalorimage;2252 +approximatelyequal;2245 +araeaekorean;318E +araeakorean;318D +arc;2312 +arighthalfring;1E9A +aring;00E5 +aringacute;01FB +aringbelow;1E01 +arrowboth;2194 +arrowdashdown;21E3 +arrowdashleft;21E0 +arrowdashright;21E2 +arrowdashup;21E1 +arrowdblboth;21D4 +arrowdbldown;21D3 +arrowdblleft;21D0 +arrowdblright;21D2 +arrowdblup;21D1 +arrowdown;2193 +arrowdownleft;2199 +arrowdownright;2198 +arrowdownwhite;21E9 +arrowheaddownmod;02C5 +arrowheadleftmod;02C2 +arrowheadrightmod;02C3 +arrowheadupmod;02C4 +arrowhorizex;F8E7 +arrowleft;2190 +arrowleftdbl;21D0 +arrowleftdblstroke;21CD +arrowleftoverright;21C6 +arrowleftwhite;21E6 +arrowright;2192 +arrowrightdblstroke;21CF +arrowrightheavy;279E +arrowrightoverleft;21C4 +arrowrightwhite;21E8 +arrowtableft;21E4 +arrowtabright;21E5 +arrowup;2191 +arrowupdn;2195 +arrowupdnbse;21A8 +arrowupdownbase;21A8 +arrowupleft;2196 +arrowupleftofdown;21C5 +arrowupright;2197 +arrowupwhite;21E7 +arrowvertex;F8E6 +asciicircum;005E +asciicircummonospace;FF3E +asciitilde;007E +asciitildemonospace;FF5E +ascript;0251 +ascriptturned;0252 +asmallhiragana;3041 +asmallkatakana;30A1 +asmallkatakanahalfwidth;FF67 +asterisk;002A +asteriskaltonearabic;066D +asteriskarabic;066D +asteriskmath;2217 +asteriskmonospace;FF0A +asterisksmall;FE61 +asterism;2042 +asuperior;F6E9 +asymptoticallyequal;2243 +at;0040 +atilde;00E3 +atmonospace;FF20 +atsmall;FE6B +aturned;0250 +aubengali;0994 +aubopomofo;3120 +audeva;0914 +augujarati;0A94 +augurmukhi;0A14 +aulengthmarkbengali;09D7 +aumatragurmukhi;0A4C +auvowelsignbengali;09CC +auvowelsigndeva;094C +auvowelsigngujarati;0ACC +avagrahadeva;093D +aybarmenian;0561 +ayin;05E2 +ayinaltonehebrew;FB20 +ayinhebrew;05E2 +b;0062 +babengali;09AC +backslash;005C +backslashmonospace;FF3C +badeva;092C +bagujarati;0AAC +bagurmukhi;0A2C +bahiragana;3070 +bahtthai;0E3F +bakatakana;30D0 +bar;007C +barmonospace;FF5C +bbopomofo;3105 +bcircle;24D1 +bdotaccent;1E03 +bdotbelow;1E05 +beamedsixteenthnotes;266C +because;2235 +becyrillic;0431 +beharabic;0628 +behfinalarabic;FE90 +behinitialarabic;FE91 +behiragana;3079 +behmedialarabic;FE92 +behmeeminitialarabic;FC9F +behmeemisolatedarabic;FC08 +behnoonfinalarabic;FC6D +bekatakana;30D9 +benarmenian;0562 +bet;05D1 +beta;03B2 +betasymbolgreek;03D0 +betdagesh;FB31 +betdageshhebrew;FB31 +bethebrew;05D1 +betrafehebrew;FB4C +bhabengali;09AD +bhadeva;092D +bhagujarati;0AAD +bhagurmukhi;0A2D +bhook;0253 +bihiragana;3073 +bikatakana;30D3 +bilabialclick;0298 +bindigurmukhi;0A02 +birusquare;3331 +blackcircle;25CF +blackdiamond;25C6 +blackdownpointingtriangle;25BC +blackleftpointingpointer;25C4 +blackleftpointingtriangle;25C0 +blacklenticularbracketleft;3010 +blacklenticularbracketleftvertical;FE3B +blacklenticularbracketright;3011 +blacklenticularbracketrightvertical;FE3C +blacklowerlefttriangle;25E3 +blacklowerrighttriangle;25E2 +blackrectangle;25AC +blackrightpointingpointer;25BA +blackrightpointingtriangle;25B6 +blacksmallsquare;25AA +blacksmilingface;263B +blacksquare;25A0 +blackstar;2605 +blackupperlefttriangle;25E4 +blackupperrighttriangle;25E5 +blackuppointingsmalltriangle;25B4 +blackuppointingtriangle;25B2 +blank;2423 +blinebelow;1E07 +block;2588 +bmonospace;FF42 +bobaimaithai;0E1A +bohiragana;307C +bokatakana;30DC +bparen;249D +bqsquare;33C3 +braceex;F8F4 +braceleft;007B +braceleftbt;F8F3 +braceleftmid;F8F2 +braceleftmonospace;FF5B +braceleftsmall;FE5B +bracelefttp;F8F1 +braceleftvertical;FE37 +braceright;007D +bracerightbt;F8FE +bracerightmid;F8FD +bracerightmonospace;FF5D +bracerightsmall;FE5C +bracerighttp;F8FC +bracerightvertical;FE38 +bracketleft;005B +bracketleftbt;F8F0 +bracketleftex;F8EF +bracketleftmonospace;FF3B +bracketlefttp;F8EE +bracketright;005D +bracketrightbt;F8FB +bracketrightex;F8FA +bracketrightmonospace;FF3D +bracketrighttp;F8F9 +breve;02D8 +brevebelowcmb;032E +brevecmb;0306 +breveinvertedbelowcmb;032F +breveinvertedcmb;0311 +breveinverteddoublecmb;0361 +bridgebelowcmb;032A +bridgeinvertedbelowcmb;033A +brokenbar;00A6 +bstroke;0180 +bsuperior;F6EA +btopbar;0183 +buhiragana;3076 +bukatakana;30D6 +bullet;2022 +bulletinverse;25D8 +bulletoperator;2219 +bullseye;25CE +c;0063 +caarmenian;056E +cabengali;099A +cacute;0107 +cadeva;091A +cagujarati;0A9A +cagurmukhi;0A1A +calsquare;3388 +candrabindubengali;0981 +candrabinducmb;0310 +candrabindudeva;0901 +candrabindugujarati;0A81 +capslock;21EA +careof;2105 +caron;02C7 +caronbelowcmb;032C +caroncmb;030C +carriagereturn;21B5 +cbopomofo;3118 +ccaron;010D +ccedilla;00E7 +ccedillaacute;1E09 +ccircle;24D2 +ccircumflex;0109 +ccurl;0255 +cdot;010B +cdotaccent;010B +cdsquare;33C5 +cedilla;00B8 +cedillacmb;0327 +cent;00A2 +centigrade;2103 +centinferior;F6DF +centmonospace;FFE0 +centoldstyle;F7A2 +centsuperior;F6E0 +chaarmenian;0579 +chabengali;099B +chadeva;091B +chagujarati;0A9B +chagurmukhi;0A1B +chbopomofo;3114 +cheabkhasiancyrillic;04BD +checkmark;2713 +checyrillic;0447 +chedescenderabkhasiancyrillic;04BF +chedescendercyrillic;04B7 +chedieresiscyrillic;04F5 +cheharmenian;0573 +chekhakassiancyrillic;04CC +cheverticalstrokecyrillic;04B9 +chi;03C7 +chieuchacirclekorean;3277 +chieuchaparenkorean;3217 +chieuchcirclekorean;3269 +chieuchkorean;314A +chieuchparenkorean;3209 +chochangthai;0E0A +chochanthai;0E08 +chochingthai;0E09 +chochoethai;0E0C +chook;0188 +cieucacirclekorean;3276 +cieucaparenkorean;3216 +cieuccirclekorean;3268 +cieuckorean;3148 +cieucparenkorean;3208 +cieucuparenkorean;321C +circle;25CB +circlemultiply;2297 +circleot;2299 +circleplus;2295 +circlepostalmark;3036 +circlewithlefthalfblack;25D0 +circlewithrighthalfblack;25D1 +circumflex;02C6 +circumflexbelowcmb;032D +circumflexcmb;0302 +clear;2327 +clickalveolar;01C2 +clickdental;01C0 +clicklateral;01C1 +clickretroflex;01C3 +club;2663 +clubsuitblack;2663 +clubsuitwhite;2667 +cmcubedsquare;33A4 +cmonospace;FF43 +cmsquaredsquare;33A0 +coarmenian;0581 +colon;003A +colonmonetary;20A1 +colonmonospace;FF1A +colonsign;20A1 +colonsmall;FE55 +colontriangularhalfmod;02D1 +colontriangularmod;02D0 +comma;002C +commaabovecmb;0313 +commaaboverightcmb;0315 +commaaccent;F6C3 +commaarabic;060C +commaarmenian;055D +commainferior;F6E1 +commamonospace;FF0C +commareversedabovecmb;0314 +commareversedmod;02BD +commasmall;FE50 +commasuperior;F6E2 +commaturnedabovecmb;0312 +commaturnedmod;02BB +compass;263C +congruent;2245 +contourintegral;222E +control;2303 +controlACK;0006 +controlBEL;0007 +controlBS;0008 +controlCAN;0018 +controlCR;000D +controlDC1;0011 +controlDC2;0012 +controlDC3;0013 +controlDC4;0014 +controlDEL;007F +controlDLE;0010 +controlEM;0019 +controlENQ;0005 +controlEOT;0004 +controlESC;001B +controlETB;0017 +controlETX;0003 +controlFF;000C +controlFS;001C +controlGS;001D +controlHT;0009 +controlLF;000A +controlNAK;0015 +controlRS;001E +controlSI;000F +controlSO;000E +controlSOT;0002 +controlSTX;0001 +controlSUB;001A +controlSYN;0016 +controlUS;001F +controlVT;000B +copyright;00A9 +copyrightsans;F8E9 +copyrightserif;F6D9 +cornerbracketleft;300C +cornerbracketlefthalfwidth;FF62 +cornerbracketleftvertical;FE41 +cornerbracketright;300D +cornerbracketrighthalfwidth;FF63 +cornerbracketrightvertical;FE42 +corporationsquare;337F +cosquare;33C7 +coverkgsquare;33C6 +cparen;249E +cruzeiro;20A2 +cstretched;0297 +curlyand;22CF +curlyor;22CE +currency;00A4 +cyrBreve;F6D1 +cyrFlex;F6D2 +cyrbreve;F6D4 +cyrflex;F6D5 +d;0064 +daarmenian;0564 +dabengali;09A6 +dadarabic;0636 +dadeva;0926 +dadfinalarabic;FEBE +dadinitialarabic;FEBF +dadmedialarabic;FEC0 +dagesh;05BC +dageshhebrew;05BC +dagger;2020 +daggerdbl;2021 +dagujarati;0AA6 +dagurmukhi;0A26 +dahiragana;3060 +dakatakana;30C0 +dalarabic;062F +dalet;05D3 +daletdagesh;FB33 +daletdageshhebrew;FB33 +dalethatafpatah;05D3 05B2 +dalethatafpatahhebrew;05D3 05B2 +dalethatafsegol;05D3 05B1 +dalethatafsegolhebrew;05D3 05B1 +dalethebrew;05D3 +dalethiriq;05D3 05B4 +dalethiriqhebrew;05D3 05B4 +daletholam;05D3 05B9 +daletholamhebrew;05D3 05B9 +daletpatah;05D3 05B7 +daletpatahhebrew;05D3 05B7 +daletqamats;05D3 05B8 +daletqamatshebrew;05D3 05B8 +daletqubuts;05D3 05BB +daletqubutshebrew;05D3 05BB +daletsegol;05D3 05B6 +daletsegolhebrew;05D3 05B6 +daletsheva;05D3 05B0 +daletshevahebrew;05D3 05B0 +dalettsere;05D3 05B5 +dalettserehebrew;05D3 05B5 +dalfinalarabic;FEAA +dammaarabic;064F +dammalowarabic;064F +dammatanaltonearabic;064C +dammatanarabic;064C +danda;0964 +dargahebrew;05A7 +dargalefthebrew;05A7 +dasiapneumatacyrilliccmb;0485 +dblGrave;F6D3 +dblanglebracketleft;300A +dblanglebracketleftvertical;FE3D +dblanglebracketright;300B +dblanglebracketrightvertical;FE3E +dblarchinvertedbelowcmb;032B +dblarrowleft;21D4 +dblarrowright;21D2 +dbldanda;0965 +dblgrave;F6D6 +dblgravecmb;030F +dblintegral;222C +dbllowline;2017 +dbllowlinecmb;0333 +dbloverlinecmb;033F +dblprimemod;02BA +dblverticalbar;2016 +dblverticallineabovecmb;030E +dbopomofo;3109 +dbsquare;33C8 +dcaron;010F +dcedilla;1E11 +dcircle;24D3 +dcircumflexbelow;1E13 +dcroat;0111 +ddabengali;09A1 +ddadeva;0921 +ddagujarati;0AA1 +ddagurmukhi;0A21 +ddalarabic;0688 +ddalfinalarabic;FB89 +dddhadeva;095C +ddhabengali;09A2 +ddhadeva;0922 +ddhagujarati;0AA2 +ddhagurmukhi;0A22 +ddotaccent;1E0B +ddotbelow;1E0D +decimalseparatorarabic;066B +decimalseparatorpersian;066B +decyrillic;0434 +degree;00B0 +dehihebrew;05AD +dehiragana;3067 +deicoptic;03EF +dekatakana;30C7 +deleteleft;232B +deleteright;2326 +delta;03B4 +deltaturned;018D +denominatorminusonenumeratorbengali;09F8 +dezh;02A4 +dhabengali;09A7 +dhadeva;0927 +dhagujarati;0AA7 +dhagurmukhi;0A27 +dhook;0257 +dialytikatonos;0385 +dialytikatonoscmb;0344 +diamond;2666 +diamondsuitwhite;2662 +dieresis;00A8 +dieresisacute;F6D7 +dieresisbelowcmb;0324 +dieresiscmb;0308 +dieresisgrave;F6D8 +dieresistonos;0385 +dihiragana;3062 +dikatakana;30C2 +dittomark;3003 +divide;00F7 +divides;2223 +divisionslash;2215 +djecyrillic;0452 +dkshade;2593 +dlinebelow;1E0F +dlsquare;3397 +dmacron;0111 +dmonospace;FF44 +dnblock;2584 +dochadathai;0E0E +dodekthai;0E14 +dohiragana;3069 +dokatakana;30C9 +dollar;0024 +dollarinferior;F6E3 +dollarmonospace;FF04 +dollaroldstyle;F724 +dollarsmall;FE69 +dollarsuperior;F6E4 +dong;20AB +dorusquare;3326 +dotaccent;02D9 +dotaccentcmb;0307 +dotbelowcmb;0323 +dotbelowcomb;0323 +dotkatakana;30FB +dotlessi;0131 +dotlessj;F6BE +dotlessjstrokehook;0284 +dotmath;22C5 +dottedcircle;25CC +doubleyodpatah;FB1F +doubleyodpatahhebrew;FB1F +downtackbelowcmb;031E +downtackmod;02D5 +dparen;249F +dsuperior;F6EB +dtail;0256 +dtopbar;018C +duhiragana;3065 +dukatakana;30C5 +dz;01F3 +dzaltone;02A3 +dzcaron;01C6 +dzcurl;02A5 +dzeabkhasiancyrillic;04E1 +dzecyrillic;0455 +dzhecyrillic;045F +e;0065 +eacute;00E9 +earth;2641 +ebengali;098F +ebopomofo;311C +ebreve;0115 +ecandradeva;090D +ecandragujarati;0A8D +ecandravowelsigndeva;0945 +ecandravowelsigngujarati;0AC5 +ecaron;011B +ecedillabreve;1E1D +echarmenian;0565 +echyiwnarmenian;0587 +ecircle;24D4 +ecircumflex;00EA +ecircumflexacute;1EBF +ecircumflexbelow;1E19 +ecircumflexdotbelow;1EC7 +ecircumflexgrave;1EC1 +ecircumflexhookabove;1EC3 +ecircumflextilde;1EC5 +ecyrillic;0454 +edblgrave;0205 +edeva;090F +edieresis;00EB +edot;0117 +edotaccent;0117 +edotbelow;1EB9 +eegurmukhi;0A0F +eematragurmukhi;0A47 +efcyrillic;0444 +egrave;00E8 +egujarati;0A8F +eharmenian;0567 +ehbopomofo;311D +ehiragana;3048 +ehookabove;1EBB +eibopomofo;311F +eight;0038 +eightarabic;0668 +eightbengali;09EE +eightcircle;2467 +eightcircleinversesansserif;2791 +eightdeva;096E +eighteencircle;2471 +eighteenparen;2485 +eighteenperiod;2499 +eightgujarati;0AEE +eightgurmukhi;0A6E +eighthackarabic;0668 +eighthangzhou;3028 +eighthnotebeamed;266B +eightideographicparen;3227 +eightinferior;2088 +eightmonospace;FF18 +eightoldstyle;F738 +eightparen;247B +eightperiod;248F +eightpersian;06F8 +eightroman;2177 +eightsuperior;2078 +eightthai;0E58 +einvertedbreve;0207 +eiotifiedcyrillic;0465 +ekatakana;30A8 +ekatakanahalfwidth;FF74 +ekonkargurmukhi;0A74 +ekorean;3154 +elcyrillic;043B +element;2208 +elevencircle;246A +elevenparen;247E +elevenperiod;2492 +elevenroman;217A +ellipsis;2026 +ellipsisvertical;22EE +emacron;0113 +emacronacute;1E17 +emacrongrave;1E15 +emcyrillic;043C +emdash;2014 +emdashvertical;FE31 +emonospace;FF45 +emphasismarkarmenian;055B +emptyset;2205 +enbopomofo;3123 +encyrillic;043D +endash;2013 +endashvertical;FE32 +endescendercyrillic;04A3 +eng;014B +engbopomofo;3125 +enghecyrillic;04A5 +enhookcyrillic;04C8 +enspace;2002 +eogonek;0119 +eokorean;3153 +eopen;025B +eopenclosed;029A +eopenreversed;025C +eopenreversedclosed;025E +eopenreversedhook;025D +eparen;24A0 +epsilon;03B5 +epsilontonos;03AD +equal;003D +equalmonospace;FF1D +equalsmall;FE66 +equalsuperior;207C +equivalence;2261 +erbopomofo;3126 +ercyrillic;0440 +ereversed;0258 +ereversedcyrillic;044D +escyrillic;0441 +esdescendercyrillic;04AB +esh;0283 +eshcurl;0286 +eshortdeva;090E +eshortvowelsigndeva;0946 +eshreversedloop;01AA +eshsquatreversed;0285 +esmallhiragana;3047 +esmallkatakana;30A7 +esmallkatakanahalfwidth;FF6A +estimated;212E +esuperior;F6EC +eta;03B7 +etarmenian;0568 +etatonos;03AE +eth;00F0 +etilde;1EBD +etildebelow;1E1B +etnahtafoukhhebrew;0591 +etnahtafoukhlefthebrew;0591 +etnahtahebrew;0591 +etnahtalefthebrew;0591 +eturned;01DD +eukorean;3161 +euro;20AC +evowelsignbengali;09C7 +evowelsigndeva;0947 +evowelsigngujarati;0AC7 +exclam;0021 +exclamarmenian;055C +exclamdbl;203C +exclamdown;00A1 +exclamdownsmall;F7A1 +exclammonospace;FF01 +exclamsmall;F721 +existential;2203 +ezh;0292 +ezhcaron;01EF +ezhcurl;0293 +ezhreversed;01B9 +ezhtail;01BA +f;0066 +fadeva;095E +fagurmukhi;0A5E +fahrenheit;2109 +fathaarabic;064E +fathalowarabic;064E +fathatanarabic;064B +fbopomofo;3108 +fcircle;24D5 +fdotaccent;1E1F +feharabic;0641 +feharmenian;0586 +fehfinalarabic;FED2 +fehinitialarabic;FED3 +fehmedialarabic;FED4 +feicoptic;03E5 +female;2640 +ff;FB00 +ffi;FB03 +ffl;FB04 +fi;FB01 +fifteencircle;246E +fifteenparen;2482 +fifteenperiod;2496 +figuredash;2012 +filledbox;25A0 +filledrect;25AC +finalkaf;05DA +finalkafdagesh;FB3A +finalkafdageshhebrew;FB3A +finalkafhebrew;05DA +finalkafqamats;05DA 05B8 +finalkafqamatshebrew;05DA 05B8 +finalkafsheva;05DA 05B0 +finalkafshevahebrew;05DA 05B0 +finalmem;05DD +finalmemhebrew;05DD +finalnun;05DF +finalnunhebrew;05DF +finalpe;05E3 +finalpehebrew;05E3 +finaltsadi;05E5 +finaltsadihebrew;05E5 +firsttonechinese;02C9 +fisheye;25C9 +fitacyrillic;0473 +five;0035 +fivearabic;0665 +fivebengali;09EB +fivecircle;2464 +fivecircleinversesansserif;278E +fivedeva;096B +fiveeighths;215D +fivegujarati;0AEB +fivegurmukhi;0A6B +fivehackarabic;0665 +fivehangzhou;3025 +fiveideographicparen;3224 +fiveinferior;2085 +fivemonospace;FF15 +fiveoldstyle;F735 +fiveparen;2478 +fiveperiod;248C +fivepersian;06F5 +fiveroman;2174 +fivesuperior;2075 +fivethai;0E55 +fl;FB02 +florin;0192 +fmonospace;FF46 +fmsquare;3399 +fofanthai;0E1F +fofathai;0E1D +fongmanthai;0E4F +forall;2200 +four;0034 +fourarabic;0664 +fourbengali;09EA +fourcircle;2463 +fourcircleinversesansserif;278D +fourdeva;096A +fourgujarati;0AEA +fourgurmukhi;0A6A +fourhackarabic;0664 +fourhangzhou;3024 +fourideographicparen;3223 +fourinferior;2084 +fourmonospace;FF14 +fournumeratorbengali;09F7 +fouroldstyle;F734 +fourparen;2477 +fourperiod;248B +fourpersian;06F4 +fourroman;2173 +foursuperior;2074 +fourteencircle;246D +fourteenparen;2481 +fourteenperiod;2495 +fourthai;0E54 +fourthtonechinese;02CB +fparen;24A1 +fraction;2044 +franc;20A3 +g;0067 +gabengali;0997 +gacute;01F5 +gadeva;0917 +gafarabic;06AF +gaffinalarabic;FB93 +gafinitialarabic;FB94 +gafmedialarabic;FB95 +gagujarati;0A97 +gagurmukhi;0A17 +gahiragana;304C +gakatakana;30AC +gamma;03B3 +gammalatinsmall;0263 +gammasuperior;02E0 +gangiacoptic;03EB +gbopomofo;310D +gbreve;011F +gcaron;01E7 +gcedilla;0123 +gcircle;24D6 +gcircumflex;011D +gcommaaccent;0123 +gdot;0121 +gdotaccent;0121 +gecyrillic;0433 +gehiragana;3052 +gekatakana;30B2 +geometricallyequal;2251 +gereshaccenthebrew;059C +gereshhebrew;05F3 +gereshmuqdamhebrew;059D +germandbls;00DF +gershayimaccenthebrew;059E +gershayimhebrew;05F4 +getamark;3013 +ghabengali;0998 +ghadarmenian;0572 +ghadeva;0918 +ghagujarati;0A98 +ghagurmukhi;0A18 +ghainarabic;063A +ghainfinalarabic;FECE +ghaininitialarabic;FECF +ghainmedialarabic;FED0 +ghemiddlehookcyrillic;0495 +ghestrokecyrillic;0493 +gheupturncyrillic;0491 +ghhadeva;095A +ghhagurmukhi;0A5A +ghook;0260 +ghzsquare;3393 +gihiragana;304E +gikatakana;30AE +gimarmenian;0563 +gimel;05D2 +gimeldagesh;FB32 +gimeldageshhebrew;FB32 +gimelhebrew;05D2 +gjecyrillic;0453 +glottalinvertedstroke;01BE +glottalstop;0294 +glottalstopinverted;0296 +glottalstopmod;02C0 +glottalstopreversed;0295 +glottalstopreversedmod;02C1 +glottalstopreversedsuperior;02E4 +glottalstopstroke;02A1 +glottalstopstrokereversed;02A2 +gmacron;1E21 +gmonospace;FF47 +gohiragana;3054 +gokatakana;30B4 +gparen;24A2 +gpasquare;33AC +gradient;2207 +grave;0060 +gravebelowcmb;0316 +gravecmb;0300 +gravecomb;0300 +gravedeva;0953 +gravelowmod;02CE +gravemonospace;FF40 +gravetonecmb;0340 +greater;003E +greaterequal;2265 +greaterequalorless;22DB +greatermonospace;FF1E +greaterorequivalent;2273 +greaterorless;2277 +greateroverequal;2267 +greatersmall;FE65 +gscript;0261 +gstroke;01E5 +guhiragana;3050 +guillemotleft;00AB +guillemotright;00BB +guilsinglleft;2039 +guilsinglright;203A +gukatakana;30B0 +guramusquare;3318 +gysquare;33C9 +h;0068 +haabkhasiancyrillic;04A9 +haaltonearabic;06C1 +habengali;09B9 +hadescendercyrillic;04B3 +hadeva;0939 +hagujarati;0AB9 +hagurmukhi;0A39 +haharabic;062D +hahfinalarabic;FEA2 +hahinitialarabic;FEA3 +hahiragana;306F +hahmedialarabic;FEA4 +haitusquare;332A +hakatakana;30CF +hakatakanahalfwidth;FF8A +halantgurmukhi;0A4D +hamzaarabic;0621 +hamzadammaarabic;0621 064F +hamzadammatanarabic;0621 064C +hamzafathaarabic;0621 064E +hamzafathatanarabic;0621 064B +hamzalowarabic;0621 +hamzalowkasraarabic;0621 0650 +hamzalowkasratanarabic;0621 064D +hamzasukunarabic;0621 0652 +hangulfiller;3164 +hardsigncyrillic;044A +harpoonleftbarbup;21BC +harpoonrightbarbup;21C0 +hasquare;33CA +hatafpatah;05B2 +hatafpatah16;05B2 +hatafpatah23;05B2 +hatafpatah2f;05B2 +hatafpatahhebrew;05B2 +hatafpatahnarrowhebrew;05B2 +hatafpatahquarterhebrew;05B2 +hatafpatahwidehebrew;05B2 +hatafqamats;05B3 +hatafqamats1b;05B3 +hatafqamats28;05B3 +hatafqamats34;05B3 +hatafqamatshebrew;05B3 +hatafqamatsnarrowhebrew;05B3 +hatafqamatsquarterhebrew;05B3 +hatafqamatswidehebrew;05B3 +hatafsegol;05B1 +hatafsegol17;05B1 +hatafsegol24;05B1 +hatafsegol30;05B1 +hatafsegolhebrew;05B1 +hatafsegolnarrowhebrew;05B1 +hatafsegolquarterhebrew;05B1 +hatafsegolwidehebrew;05B1 +hbar;0127 +hbopomofo;310F +hbrevebelow;1E2B +hcedilla;1E29 +hcircle;24D7 +hcircumflex;0125 +hdieresis;1E27 +hdotaccent;1E23 +hdotbelow;1E25 +he;05D4 +heart;2665 +heartsuitblack;2665 +heartsuitwhite;2661 +hedagesh;FB34 +hedageshhebrew;FB34 +hehaltonearabic;06C1 +heharabic;0647 +hehebrew;05D4 +hehfinalaltonearabic;FBA7 +hehfinalalttwoarabic;FEEA +hehfinalarabic;FEEA +hehhamzaabovefinalarabic;FBA5 +hehhamzaaboveisolatedarabic;FBA4 +hehinitialaltonearabic;FBA8 +hehinitialarabic;FEEB +hehiragana;3078 +hehmedialaltonearabic;FBA9 +hehmedialarabic;FEEC +heiseierasquare;337B +hekatakana;30D8 +hekatakanahalfwidth;FF8D +hekutaarusquare;3336 +henghook;0267 +herutusquare;3339 +het;05D7 +hethebrew;05D7 +hhook;0266 +hhooksuperior;02B1 +hieuhacirclekorean;327B +hieuhaparenkorean;321B +hieuhcirclekorean;326D +hieuhkorean;314E +hieuhparenkorean;320D +hihiragana;3072 +hikatakana;30D2 +hikatakanahalfwidth;FF8B +hiriq;05B4 +hiriq14;05B4 +hiriq21;05B4 +hiriq2d;05B4 +hiriqhebrew;05B4 +hiriqnarrowhebrew;05B4 +hiriqquarterhebrew;05B4 +hiriqwidehebrew;05B4 +hlinebelow;1E96 +hmonospace;FF48 +hoarmenian;0570 +hohipthai;0E2B +hohiragana;307B +hokatakana;30DB +hokatakanahalfwidth;FF8E +holam;05B9 +holam19;05B9 +holam26;05B9 +holam32;05B9 +holamhebrew;05B9 +holamnarrowhebrew;05B9 +holamquarterhebrew;05B9 +holamwidehebrew;05B9 +honokhukthai;0E2E +hookabovecomb;0309 +hookcmb;0309 +hookpalatalizedbelowcmb;0321 +hookretroflexbelowcmb;0322 +hoonsquare;3342 +horicoptic;03E9 +horizontalbar;2015 +horncmb;031B +hotsprings;2668 +house;2302 +hparen;24A3 +hsuperior;02B0 +hturned;0265 +huhiragana;3075 +huiitosquare;3333 +hukatakana;30D5 +hukatakanahalfwidth;FF8C +hungarumlaut;02DD +hungarumlautcmb;030B +hv;0195 +hyphen;002D +hypheninferior;F6E5 +hyphenmonospace;FF0D +hyphensmall;FE63 +hyphensuperior;F6E6 +hyphentwo;2010 +i;0069 +iacute;00ED +iacyrillic;044F +ibengali;0987 +ibopomofo;3127 +ibreve;012D +icaron;01D0 +icircle;24D8 +icircumflex;00EE +icyrillic;0456 +idblgrave;0209 +ideographearthcircle;328F +ideographfirecircle;328B +ideographicallianceparen;323F +ideographiccallparen;323A +ideographiccentrecircle;32A5 +ideographicclose;3006 +ideographiccomma;3001 +ideographiccommaleft;FF64 +ideographiccongratulationparen;3237 +ideographiccorrectcircle;32A3 +ideographicearthparen;322F +ideographicenterpriseparen;323D +ideographicexcellentcircle;329D +ideographicfestivalparen;3240 +ideographicfinancialcircle;3296 +ideographicfinancialparen;3236 +ideographicfireparen;322B +ideographichaveparen;3232 +ideographichighcircle;32A4 +ideographiciterationmark;3005 +ideographiclaborcircle;3298 +ideographiclaborparen;3238 +ideographicleftcircle;32A7 +ideographiclowcircle;32A6 +ideographicmedicinecircle;32A9 +ideographicmetalparen;322E +ideographicmoonparen;322A +ideographicnameparen;3234 +ideographicperiod;3002 +ideographicprintcircle;329E +ideographicreachparen;3243 +ideographicrepresentparen;3239 +ideographicresourceparen;323E +ideographicrightcircle;32A8 +ideographicsecretcircle;3299 +ideographicselfparen;3242 +ideographicsocietyparen;3233 +ideographicspace;3000 +ideographicspecialparen;3235 +ideographicstockparen;3231 +ideographicstudyparen;323B +ideographicsunparen;3230 +ideographicsuperviseparen;323C +ideographicwaterparen;322C +ideographicwoodparen;322D +ideographiczero;3007 +ideographmetalcircle;328E +ideographmooncircle;328A +ideographnamecircle;3294 +ideographsuncircle;3290 +ideographwatercircle;328C +ideographwoodcircle;328D +ideva;0907 +idieresis;00EF +idieresisacute;1E2F +idieresiscyrillic;04E5 +idotbelow;1ECB +iebrevecyrillic;04D7 +iecyrillic;0435 +ieungacirclekorean;3275 +ieungaparenkorean;3215 +ieungcirclekorean;3267 +ieungkorean;3147 +ieungparenkorean;3207 +igrave;00EC +igujarati;0A87 +igurmukhi;0A07 +ihiragana;3044 +ihookabove;1EC9 +iibengali;0988 +iicyrillic;0438 +iideva;0908 +iigujarati;0A88 +iigurmukhi;0A08 +iimatragurmukhi;0A40 +iinvertedbreve;020B +iishortcyrillic;0439 +iivowelsignbengali;09C0 +iivowelsigndeva;0940 +iivowelsigngujarati;0AC0 +ij;0133 +ikatakana;30A4 +ikatakanahalfwidth;FF72 +ikorean;3163 +ilde;02DC +iluyhebrew;05AC +imacron;012B +imacroncyrillic;04E3 +imageorapproximatelyequal;2253 +imatragurmukhi;0A3F +imonospace;FF49 +increment;2206 +infinity;221E +iniarmenian;056B +integral;222B +integralbottom;2321 +integralbt;2321 +integralex;F8F5 +integraltop;2320 +integraltp;2320 +intersection;2229 +intisquare;3305 +invbullet;25D8 +invcircle;25D9 +invsmileface;263B +iocyrillic;0451 +iogonek;012F +iota;03B9 +iotadieresis;03CA +iotadieresistonos;0390 +iotalatin;0269 +iotatonos;03AF +iparen;24A4 +irigurmukhi;0A72 +ismallhiragana;3043 +ismallkatakana;30A3 +ismallkatakanahalfwidth;FF68 +issharbengali;09FA +istroke;0268 +isuperior;F6ED +iterationhiragana;309D +iterationkatakana;30FD +itilde;0129 +itildebelow;1E2D +iubopomofo;3129 +iucyrillic;044E +ivowelsignbengali;09BF +ivowelsigndeva;093F +ivowelsigngujarati;0ABF +izhitsacyrillic;0475 +izhitsadblgravecyrillic;0477 +j;006A +jaarmenian;0571 +jabengali;099C +jadeva;091C +jagujarati;0A9C +jagurmukhi;0A1C +jbopomofo;3110 +jcaron;01F0 +jcircle;24D9 +jcircumflex;0135 +jcrossedtail;029D +jdotlessstroke;025F +jecyrillic;0458 +jeemarabic;062C +jeemfinalarabic;FE9E +jeeminitialarabic;FE9F +jeemmedialarabic;FEA0 +jeharabic;0698 +jehfinalarabic;FB8B +jhabengali;099D +jhadeva;091D +jhagujarati;0A9D +jhagurmukhi;0A1D +jheharmenian;057B +jis;3004 +jmonospace;FF4A +jparen;24A5 +jsuperior;02B2 +k;006B +kabashkircyrillic;04A1 +kabengali;0995 +kacute;1E31 +kacyrillic;043A +kadescendercyrillic;049B +kadeva;0915 +kaf;05DB +kafarabic;0643 +kafdagesh;FB3B +kafdageshhebrew;FB3B +kaffinalarabic;FEDA +kafhebrew;05DB +kafinitialarabic;FEDB +kafmedialarabic;FEDC +kafrafehebrew;FB4D +kagujarati;0A95 +kagurmukhi;0A15 +kahiragana;304B +kahookcyrillic;04C4 +kakatakana;30AB +kakatakanahalfwidth;FF76 +kappa;03BA +kappasymbolgreek;03F0 +kapyeounmieumkorean;3171 +kapyeounphieuphkorean;3184 +kapyeounpieupkorean;3178 +kapyeounssangpieupkorean;3179 +karoriisquare;330D +kashidaautoarabic;0640 +kashidaautonosidebearingarabic;0640 +kasmallkatakana;30F5 +kasquare;3384 +kasraarabic;0650 +kasratanarabic;064D +kastrokecyrillic;049F +katahiraprolongmarkhalfwidth;FF70 +kaverticalstrokecyrillic;049D +kbopomofo;310E +kcalsquare;3389 +kcaron;01E9 +kcedilla;0137 +kcircle;24DA +kcommaaccent;0137 +kdotbelow;1E33 +keharmenian;0584 +kehiragana;3051 +kekatakana;30B1 +kekatakanahalfwidth;FF79 +kenarmenian;056F +kesmallkatakana;30F6 +kgreenlandic;0138 +khabengali;0996 +khacyrillic;0445 +khadeva;0916 +khagujarati;0A96 +khagurmukhi;0A16 +khaharabic;062E +khahfinalarabic;FEA6 +khahinitialarabic;FEA7 +khahmedialarabic;FEA8 +kheicoptic;03E7 +khhadeva;0959 +khhagurmukhi;0A59 +khieukhacirclekorean;3278 +khieukhaparenkorean;3218 +khieukhcirclekorean;326A +khieukhkorean;314B +khieukhparenkorean;320A +khokhaithai;0E02 +khokhonthai;0E05 +khokhuatthai;0E03 +khokhwaithai;0E04 +khomutthai;0E5B +khook;0199 +khorakhangthai;0E06 +khzsquare;3391 +kihiragana;304D +kikatakana;30AD +kikatakanahalfwidth;FF77 +kiroguramusquare;3315 +kiromeetorusquare;3316 +kirosquare;3314 +kiyeokacirclekorean;326E +kiyeokaparenkorean;320E +kiyeokcirclekorean;3260 +kiyeokkorean;3131 +kiyeokparenkorean;3200 +kiyeoksioskorean;3133 +kjecyrillic;045C +klinebelow;1E35 +klsquare;3398 +kmcubedsquare;33A6 +kmonospace;FF4B +kmsquaredsquare;33A2 +kohiragana;3053 +kohmsquare;33C0 +kokaithai;0E01 +kokatakana;30B3 +kokatakanahalfwidth;FF7A +kooposquare;331E +koppacyrillic;0481 +koreanstandardsymbol;327F +koroniscmb;0343 +kparen;24A6 +kpasquare;33AA +ksicyrillic;046F +ktsquare;33CF +kturned;029E +kuhiragana;304F +kukatakana;30AF +kukatakanahalfwidth;FF78 +kvsquare;33B8 +kwsquare;33BE +l;006C +labengali;09B2 +lacute;013A +ladeva;0932 +lagujarati;0AB2 +lagurmukhi;0A32 +lakkhangyaothai;0E45 +lamaleffinalarabic;FEFC +lamalefhamzaabovefinalarabic;FEF8 +lamalefhamzaaboveisolatedarabic;FEF7 +lamalefhamzabelowfinalarabic;FEFA +lamalefhamzabelowisolatedarabic;FEF9 +lamalefisolatedarabic;FEFB +lamalefmaddaabovefinalarabic;FEF6 +lamalefmaddaaboveisolatedarabic;FEF5 +lamarabic;0644 +lambda;03BB +lambdastroke;019B +lamed;05DC +lameddagesh;FB3C +lameddageshhebrew;FB3C +lamedhebrew;05DC +lamedholam;05DC 05B9 +lamedholamdagesh;05DC 05B9 05BC +lamedholamdageshhebrew;05DC 05B9 05BC +lamedholamhebrew;05DC 05B9 +lamfinalarabic;FEDE +lamhahinitialarabic;FCCA +laminitialarabic;FEDF +lamjeeminitialarabic;FCC9 +lamkhahinitialarabic;FCCB +lamlamhehisolatedarabic;FDF2 +lammedialarabic;FEE0 +lammeemhahinitialarabic;FD88 +lammeeminitialarabic;FCCC +lammeemjeeminitialarabic;FEDF FEE4 FEA0 +lammeemkhahinitialarabic;FEDF FEE4 FEA8 +largecircle;25EF +lbar;019A +lbelt;026C +lbopomofo;310C +lcaron;013E +lcedilla;013C +lcircle;24DB +lcircumflexbelow;1E3D +lcommaaccent;013C +ldot;0140 +ldotaccent;0140 +ldotbelow;1E37 +ldotbelowmacron;1E39 +leftangleabovecmb;031A +lefttackbelowcmb;0318 +less;003C +lessequal;2264 +lessequalorgreater;22DA +lessmonospace;FF1C +lessorequivalent;2272 +lessorgreater;2276 +lessoverequal;2266 +lesssmall;FE64 +lezh;026E +lfblock;258C +lhookretroflex;026D +lira;20A4 +liwnarmenian;056C +lj;01C9 +ljecyrillic;0459 +ll;F6C0 +lladeva;0933 +llagujarati;0AB3 +llinebelow;1E3B +llladeva;0934 +llvocalicbengali;09E1 +llvocalicdeva;0961 +llvocalicvowelsignbengali;09E3 +llvocalicvowelsigndeva;0963 +lmiddletilde;026B +lmonospace;FF4C +lmsquare;33D0 +lochulathai;0E2C +logicaland;2227 +logicalnot;00AC +logicalnotreversed;2310 +logicalor;2228 +lolingthai;0E25 +longs;017F +lowlinecenterline;FE4E +lowlinecmb;0332 +lowlinedashed;FE4D +lozenge;25CA +lparen;24A7 +lslash;0142 +lsquare;2113 +lsuperior;F6EE +ltshade;2591 +luthai;0E26 +lvocalicbengali;098C +lvocalicdeva;090C +lvocalicvowelsignbengali;09E2 +lvocalicvowelsigndeva;0962 +lxsquare;33D3 +m;006D +mabengali;09AE +macron;00AF +macronbelowcmb;0331 +macroncmb;0304 +macronlowmod;02CD +macronmonospace;FFE3 +macute;1E3F +madeva;092E +magujarati;0AAE +magurmukhi;0A2E +mahapakhhebrew;05A4 +mahapakhlefthebrew;05A4 +mahiragana;307E +maichattawalowleftthai;F895 +maichattawalowrightthai;F894 +maichattawathai;0E4B +maichattawaupperleftthai;F893 +maieklowleftthai;F88C +maieklowrightthai;F88B +maiekthai;0E48 +maiekupperleftthai;F88A +maihanakatleftthai;F884 +maihanakatthai;0E31 +maitaikhuleftthai;F889 +maitaikhuthai;0E47 +maitholowleftthai;F88F +maitholowrightthai;F88E +maithothai;0E49 +maithoupperleftthai;F88D +maitrilowleftthai;F892 +maitrilowrightthai;F891 +maitrithai;0E4A +maitriupperleftthai;F890 +maiyamokthai;0E46 +makatakana;30DE +makatakanahalfwidth;FF8F +male;2642 +mansyonsquare;3347 +maqafhebrew;05BE +mars;2642 +masoracirclehebrew;05AF +masquare;3383 +mbopomofo;3107 +mbsquare;33D4 +mcircle;24DC +mcubedsquare;33A5 +mdotaccent;1E41 +mdotbelow;1E43 +meemarabic;0645 +meemfinalarabic;FEE2 +meeminitialarabic;FEE3 +meemmedialarabic;FEE4 +meemmeeminitialarabic;FCD1 +meemmeemisolatedarabic;FC48 +meetorusquare;334D +mehiragana;3081 +meizierasquare;337E +mekatakana;30E1 +mekatakanahalfwidth;FF92 +mem;05DE +memdagesh;FB3E +memdageshhebrew;FB3E +memhebrew;05DE +menarmenian;0574 +merkhahebrew;05A5 +merkhakefulahebrew;05A6 +merkhakefulalefthebrew;05A6 +merkhalefthebrew;05A5 +mhook;0271 +mhzsquare;3392 +middledotkatakanahalfwidth;FF65 +middot;00B7 +mieumacirclekorean;3272 +mieumaparenkorean;3212 +mieumcirclekorean;3264 +mieumkorean;3141 +mieumpansioskorean;3170 +mieumparenkorean;3204 +mieumpieupkorean;316E +mieumsioskorean;316F +mihiragana;307F +mikatakana;30DF +mikatakanahalfwidth;FF90 +minus;2212 +minusbelowcmb;0320 +minuscircle;2296 +minusmod;02D7 +minusplus;2213 +minute;2032 +miribaarusquare;334A +mirisquare;3349 +mlonglegturned;0270 +mlsquare;3396 +mmcubedsquare;33A3 +mmonospace;FF4D +mmsquaredsquare;339F +mohiragana;3082 +mohmsquare;33C1 +mokatakana;30E2 +mokatakanahalfwidth;FF93 +molsquare;33D6 +momathai;0E21 +moverssquare;33A7 +moverssquaredsquare;33A8 +mparen;24A8 +mpasquare;33AB +mssquare;33B3 +msuperior;F6EF +mturned;026F +mu;00B5 +mu1;00B5 +muasquare;3382 +muchgreater;226B +muchless;226A +mufsquare;338C +mugreek;03BC +mugsquare;338D +muhiragana;3080 +mukatakana;30E0 +mukatakanahalfwidth;FF91 +mulsquare;3395 +multiply;00D7 +mumsquare;339B +munahhebrew;05A3 +munahlefthebrew;05A3 +musicalnote;266A +musicalnotedbl;266B +musicflatsign;266D +musicsharpsign;266F +mussquare;33B2 +muvsquare;33B6 +muwsquare;33BC +mvmegasquare;33B9 +mvsquare;33B7 +mwmegasquare;33BF +mwsquare;33BD +n;006E +nabengali;09A8 +nabla;2207 +nacute;0144 +nadeva;0928 +nagujarati;0AA8 +nagurmukhi;0A28 +nahiragana;306A +nakatakana;30CA +nakatakanahalfwidth;FF85 +napostrophe;0149 +nasquare;3381 +nbopomofo;310B +nbspace;00A0 +ncaron;0148 +ncedilla;0146 +ncircle;24DD +ncircumflexbelow;1E4B +ncommaaccent;0146 +ndotaccent;1E45 +ndotbelow;1E47 +nehiragana;306D +nekatakana;30CD +nekatakanahalfwidth;FF88 +newsheqelsign;20AA +nfsquare;338B +ngabengali;0999 +ngadeva;0919 +ngagujarati;0A99 +ngagurmukhi;0A19 +ngonguthai;0E07 +nhiragana;3093 +nhookleft;0272 +nhookretroflex;0273 +nieunacirclekorean;326F +nieunaparenkorean;320F +nieuncieuckorean;3135 +nieuncirclekorean;3261 +nieunhieuhkorean;3136 +nieunkorean;3134 +nieunpansioskorean;3168 +nieunparenkorean;3201 +nieunsioskorean;3167 +nieuntikeutkorean;3166 +nihiragana;306B +nikatakana;30CB +nikatakanahalfwidth;FF86 +nikhahitleftthai;F899 +nikhahitthai;0E4D +nine;0039 +ninearabic;0669 +ninebengali;09EF +ninecircle;2468 +ninecircleinversesansserif;2792 +ninedeva;096F +ninegujarati;0AEF +ninegurmukhi;0A6F +ninehackarabic;0669 +ninehangzhou;3029 +nineideographicparen;3228 +nineinferior;2089 +ninemonospace;FF19 +nineoldstyle;F739 +nineparen;247C +nineperiod;2490 +ninepersian;06F9 +nineroman;2178 +ninesuperior;2079 +nineteencircle;2472 +nineteenparen;2486 +nineteenperiod;249A +ninethai;0E59 +nj;01CC +njecyrillic;045A +nkatakana;30F3 +nkatakanahalfwidth;FF9D +nlegrightlong;019E +nlinebelow;1E49 +nmonospace;FF4E +nmsquare;339A +nnabengali;09A3 +nnadeva;0923 +nnagujarati;0AA3 +nnagurmukhi;0A23 +nnnadeva;0929 +nohiragana;306E +nokatakana;30CE +nokatakanahalfwidth;FF89 +nonbreakingspace;00A0 +nonenthai;0E13 +nonuthai;0E19 +noonarabic;0646 +noonfinalarabic;FEE6 +noonghunnaarabic;06BA +noonghunnafinalarabic;FB9F +noonhehinitialarabic;FEE7 FEEC +nooninitialarabic;FEE7 +noonjeeminitialarabic;FCD2 +noonjeemisolatedarabic;FC4B +noonmedialarabic;FEE8 +noonmeeminitialarabic;FCD5 +noonmeemisolatedarabic;FC4E +noonnoonfinalarabic;FC8D +notcontains;220C +notelement;2209 +notelementof;2209 +notequal;2260 +notgreater;226F +notgreaternorequal;2271 +notgreaternorless;2279 +notidentical;2262 +notless;226E +notlessnorequal;2270 +notparallel;2226 +notprecedes;2280 +notsubset;2284 +notsucceeds;2281 +notsuperset;2285 +nowarmenian;0576 +nparen;24A9 +nssquare;33B1 +nsuperior;207F +ntilde;00F1 +nu;03BD +nuhiragana;306C +nukatakana;30CC +nukatakanahalfwidth;FF87 +nuktabengali;09BC +nuktadeva;093C +nuktagujarati;0ABC +nuktagurmukhi;0A3C +numbersign;0023 +numbersignmonospace;FF03 +numbersignsmall;FE5F +numeralsigngreek;0374 +numeralsignlowergreek;0375 +numero;2116 +nun;05E0 +nundagesh;FB40 +nundageshhebrew;FB40 +nunhebrew;05E0 +nvsquare;33B5 +nwsquare;33BB +nyabengali;099E +nyadeva;091E +nyagujarati;0A9E +nyagurmukhi;0A1E +o;006F +oacute;00F3 +oangthai;0E2D +obarred;0275 +obarredcyrillic;04E9 +obarreddieresiscyrillic;04EB +obengali;0993 +obopomofo;311B +obreve;014F +ocandradeva;0911 +ocandragujarati;0A91 +ocandravowelsigndeva;0949 +ocandravowelsigngujarati;0AC9 +ocaron;01D2 +ocircle;24DE +ocircumflex;00F4 +ocircumflexacute;1ED1 +ocircumflexdotbelow;1ED9 +ocircumflexgrave;1ED3 +ocircumflexhookabove;1ED5 +ocircumflextilde;1ED7 +ocyrillic;043E +odblacute;0151 +odblgrave;020D +odeva;0913 +odieresis;00F6 +odieresiscyrillic;04E7 +odotbelow;1ECD +oe;0153 +oekorean;315A +ogonek;02DB +ogonekcmb;0328 +ograve;00F2 +ogujarati;0A93 +oharmenian;0585 +ohiragana;304A +ohookabove;1ECF +ohorn;01A1 +ohornacute;1EDB +ohorndotbelow;1EE3 +ohorngrave;1EDD +ohornhookabove;1EDF +ohorntilde;1EE1 +ohungarumlaut;0151 +oi;01A3 +oinvertedbreve;020F +okatakana;30AA +okatakanahalfwidth;FF75 +okorean;3157 +olehebrew;05AB +omacron;014D +omacronacute;1E53 +omacrongrave;1E51 +omdeva;0950 +omega;03C9 +omega1;03D6 +omegacyrillic;0461 +omegalatinclosed;0277 +omegaroundcyrillic;047B +omegatitlocyrillic;047D +omegatonos;03CE +omgujarati;0AD0 +omicron;03BF +omicrontonos;03CC +omonospace;FF4F +one;0031 +onearabic;0661 +onebengali;09E7 +onecircle;2460 +onecircleinversesansserif;278A +onedeva;0967 +onedotenleader;2024 +oneeighth;215B +onefitted;F6DC +onegujarati;0AE7 +onegurmukhi;0A67 +onehackarabic;0661 +onehalf;00BD +onehangzhou;3021 +oneideographicparen;3220 +oneinferior;2081 +onemonospace;FF11 +onenumeratorbengali;09F4 +oneoldstyle;F731 +oneparen;2474 +oneperiod;2488 +onepersian;06F1 +onequarter;00BC +oneroman;2170 +onesuperior;00B9 +onethai;0E51 +onethird;2153 +oogonek;01EB +oogonekmacron;01ED +oogurmukhi;0A13 +oomatragurmukhi;0A4B +oopen;0254 +oparen;24AA +openbullet;25E6 +option;2325 +ordfeminine;00AA +ordmasculine;00BA +orthogonal;221F +oshortdeva;0912 +oshortvowelsigndeva;094A +oslash;00F8 +oslashacute;01FF +osmallhiragana;3049 +osmallkatakana;30A9 +osmallkatakanahalfwidth;FF6B +ostrokeacute;01FF +osuperior;F6F0 +otcyrillic;047F +otilde;00F5 +otildeacute;1E4D +otildedieresis;1E4F +oubopomofo;3121 +overline;203E +overlinecenterline;FE4A +overlinecmb;0305 +overlinedashed;FE49 +overlinedblwavy;FE4C +overlinewavy;FE4B +overscore;00AF +ovowelsignbengali;09CB +ovowelsigndeva;094B +ovowelsigngujarati;0ACB +p;0070 +paampssquare;3380 +paasentosquare;332B +pabengali;09AA +pacute;1E55 +padeva;092A +pagedown;21DF +pageup;21DE +pagujarati;0AAA +pagurmukhi;0A2A +pahiragana;3071 +paiyannoithai;0E2F +pakatakana;30D1 +palatalizationcyrilliccmb;0484 +palochkacyrillic;04C0 +pansioskorean;317F +paragraph;00B6 +parallel;2225 +parenleft;0028 +parenleftaltonearabic;FD3E +parenleftbt;F8ED +parenleftex;F8EC +parenleftinferior;208D +parenleftmonospace;FF08 +parenleftsmall;FE59 +parenleftsuperior;207D +parenlefttp;F8EB +parenleftvertical;FE35 +parenright;0029 +parenrightaltonearabic;FD3F +parenrightbt;F8F8 +parenrightex;F8F7 +parenrightinferior;208E +parenrightmonospace;FF09 +parenrightsmall;FE5A +parenrightsuperior;207E +parenrighttp;F8F6 +parenrightvertical;FE36 +partialdiff;2202 +paseqhebrew;05C0 +pashtahebrew;0599 +pasquare;33A9 +patah;05B7 +patah11;05B7 +patah1d;05B7 +patah2a;05B7 +patahhebrew;05B7 +patahnarrowhebrew;05B7 +patahquarterhebrew;05B7 +patahwidehebrew;05B7 +pazerhebrew;05A1 +pbopomofo;3106 +pcircle;24DF +pdotaccent;1E57 +pe;05E4 +pecyrillic;043F +pedagesh;FB44 +pedageshhebrew;FB44 +peezisquare;333B +pefinaldageshhebrew;FB43 +peharabic;067E +peharmenian;057A +pehebrew;05E4 +pehfinalarabic;FB57 +pehinitialarabic;FB58 +pehiragana;307A +pehmedialarabic;FB59 +pekatakana;30DA +pemiddlehookcyrillic;04A7 +perafehebrew;FB4E +percent;0025 +percentarabic;066A +percentmonospace;FF05 +percentsmall;FE6A +period;002E +periodarmenian;0589 +periodcentered;00B7 +periodhalfwidth;FF61 +periodinferior;F6E7 +periodmonospace;FF0E +periodsmall;FE52 +periodsuperior;F6E8 +perispomenigreekcmb;0342 +perpendicular;22A5 +perthousand;2030 +peseta;20A7 +pfsquare;338A +phabengali;09AB +phadeva;092B +phagujarati;0AAB +phagurmukhi;0A2B +phi;03C6 +phi1;03D5 +phieuphacirclekorean;327A +phieuphaparenkorean;321A +phieuphcirclekorean;326C +phieuphkorean;314D +phieuphparenkorean;320C +philatin;0278 +phinthuthai;0E3A +phisymbolgreek;03D5 +phook;01A5 +phophanthai;0E1E +phophungthai;0E1C +phosamphaothai;0E20 +pi;03C0 +pieupacirclekorean;3273 +pieupaparenkorean;3213 +pieupcieuckorean;3176 +pieupcirclekorean;3265 +pieupkiyeokkorean;3172 +pieupkorean;3142 +pieupparenkorean;3205 +pieupsioskiyeokkorean;3174 +pieupsioskorean;3144 +pieupsiostikeutkorean;3175 +pieupthieuthkorean;3177 +pieuptikeutkorean;3173 +pihiragana;3074 +pikatakana;30D4 +pisymbolgreek;03D6 +piwrarmenian;0583 +plus;002B +plusbelowcmb;031F +pluscircle;2295 +plusminus;00B1 +plusmod;02D6 +plusmonospace;FF0B +plussmall;FE62 +plussuperior;207A +pmonospace;FF50 +pmsquare;33D8 +pohiragana;307D +pointingindexdownwhite;261F +pointingindexleftwhite;261C +pointingindexrightwhite;261E +pointingindexupwhite;261D +pokatakana;30DD +poplathai;0E1B +postalmark;3012 +postalmarkface;3020 +pparen;24AB +precedes;227A +prescription;211E +primemod;02B9 +primereversed;2035 +product;220F +projective;2305 +prolongedkana;30FC +propellor;2318 +propersubset;2282 +propersuperset;2283 +proportion;2237 +proportional;221D +psi;03C8 +psicyrillic;0471 +psilipneumatacyrilliccmb;0486 +pssquare;33B0 +puhiragana;3077 +pukatakana;30D7 +pvsquare;33B4 +pwsquare;33BA +q;0071 +qadeva;0958 +qadmahebrew;05A8 +qafarabic;0642 +qaffinalarabic;FED6 +qafinitialarabic;FED7 +qafmedialarabic;FED8 +qamats;05B8 +qamats10;05B8 +qamats1a;05B8 +qamats1c;05B8 +qamats27;05B8 +qamats29;05B8 +qamats33;05B8 +qamatsde;05B8 +qamatshebrew;05B8 +qamatsnarrowhebrew;05B8 +qamatsqatanhebrew;05B8 +qamatsqatannarrowhebrew;05B8 +qamatsqatanquarterhebrew;05B8 +qamatsqatanwidehebrew;05B8 +qamatsquarterhebrew;05B8 +qamatswidehebrew;05B8 +qarneyparahebrew;059F +qbopomofo;3111 +qcircle;24E0 +qhook;02A0 +qmonospace;FF51 +qof;05E7 +qofdagesh;FB47 +qofdageshhebrew;FB47 +qofhatafpatah;05E7 05B2 +qofhatafpatahhebrew;05E7 05B2 +qofhatafsegol;05E7 05B1 +qofhatafsegolhebrew;05E7 05B1 +qofhebrew;05E7 +qofhiriq;05E7 05B4 +qofhiriqhebrew;05E7 05B4 +qofholam;05E7 05B9 +qofholamhebrew;05E7 05B9 +qofpatah;05E7 05B7 +qofpatahhebrew;05E7 05B7 +qofqamats;05E7 05B8 +qofqamatshebrew;05E7 05B8 +qofqubuts;05E7 05BB +qofqubutshebrew;05E7 05BB +qofsegol;05E7 05B6 +qofsegolhebrew;05E7 05B6 +qofsheva;05E7 05B0 +qofshevahebrew;05E7 05B0 +qoftsere;05E7 05B5 +qoftserehebrew;05E7 05B5 +qparen;24AC +quarternote;2669 +qubuts;05BB +qubuts18;05BB +qubuts25;05BB +qubuts31;05BB +qubutshebrew;05BB +qubutsnarrowhebrew;05BB +qubutsquarterhebrew;05BB +qubutswidehebrew;05BB +question;003F +questionarabic;061F +questionarmenian;055E +questiondown;00BF +questiondownsmall;F7BF +questiongreek;037E +questionmonospace;FF1F +questionsmall;F73F +quotedbl;0022 +quotedblbase;201E +quotedblleft;201C +quotedblmonospace;FF02 +quotedblprime;301E +quotedblprimereversed;301D +quotedblright;201D +quoteleft;2018 +quoteleftreversed;201B +quotereversed;201B +quoteright;2019 +quoterightn;0149 +quotesinglbase;201A +quotesingle;0027 +quotesinglemonospace;FF07 +r;0072 +raarmenian;057C +rabengali;09B0 +racute;0155 +radeva;0930 +radical;221A +radicalex;F8E5 +radoverssquare;33AE +radoverssquaredsquare;33AF +radsquare;33AD +rafe;05BF +rafehebrew;05BF +ragujarati;0AB0 +ragurmukhi;0A30 +rahiragana;3089 +rakatakana;30E9 +rakatakanahalfwidth;FF97 +ralowerdiagonalbengali;09F1 +ramiddlediagonalbengali;09F0 +ramshorn;0264 +ratio;2236 +rbopomofo;3116 +rcaron;0159 +rcedilla;0157 +rcircle;24E1 +rcommaaccent;0157 +rdblgrave;0211 +rdotaccent;1E59 +rdotbelow;1E5B +rdotbelowmacron;1E5D +referencemark;203B +reflexsubset;2286 +reflexsuperset;2287 +registered;00AE +registersans;F8E8 +registerserif;F6DA +reharabic;0631 +reharmenian;0580 +rehfinalarabic;FEAE +rehiragana;308C +rehyehaleflamarabic;0631 FEF3 FE8E 0644 +rekatakana;30EC +rekatakanahalfwidth;FF9A +resh;05E8 +reshdageshhebrew;FB48 +reshhatafpatah;05E8 05B2 +reshhatafpatahhebrew;05E8 05B2 +reshhatafsegol;05E8 05B1 +reshhatafsegolhebrew;05E8 05B1 +reshhebrew;05E8 +reshhiriq;05E8 05B4 +reshhiriqhebrew;05E8 05B4 +reshholam;05E8 05B9 +reshholamhebrew;05E8 05B9 +reshpatah;05E8 05B7 +reshpatahhebrew;05E8 05B7 +reshqamats;05E8 05B8 +reshqamatshebrew;05E8 05B8 +reshqubuts;05E8 05BB +reshqubutshebrew;05E8 05BB +reshsegol;05E8 05B6 +reshsegolhebrew;05E8 05B6 +reshsheva;05E8 05B0 +reshshevahebrew;05E8 05B0 +reshtsere;05E8 05B5 +reshtserehebrew;05E8 05B5 +reversedtilde;223D +reviahebrew;0597 +reviamugrashhebrew;0597 +revlogicalnot;2310 +rfishhook;027E +rfishhookreversed;027F +rhabengali;09DD +rhadeva;095D +rho;03C1 +rhook;027D +rhookturned;027B +rhookturnedsuperior;02B5 +rhosymbolgreek;03F1 +rhotichookmod;02DE +rieulacirclekorean;3271 +rieulaparenkorean;3211 +rieulcirclekorean;3263 +rieulhieuhkorean;3140 +rieulkiyeokkorean;313A +rieulkiyeoksioskorean;3169 +rieulkorean;3139 +rieulmieumkorean;313B +rieulpansioskorean;316C +rieulparenkorean;3203 +rieulphieuphkorean;313F +rieulpieupkorean;313C +rieulpieupsioskorean;316B +rieulsioskorean;313D +rieulthieuthkorean;313E +rieultikeutkorean;316A +rieulyeorinhieuhkorean;316D +rightangle;221F +righttackbelowcmb;0319 +righttriangle;22BF +rihiragana;308A +rikatakana;30EA +rikatakanahalfwidth;FF98 +ring;02DA +ringbelowcmb;0325 +ringcmb;030A +ringhalfleft;02BF +ringhalfleftarmenian;0559 +ringhalfleftbelowcmb;031C +ringhalfleftcentered;02D3 +ringhalfright;02BE +ringhalfrightbelowcmb;0339 +ringhalfrightcentered;02D2 +rinvertedbreve;0213 +rittorusquare;3351 +rlinebelow;1E5F +rlongleg;027C +rlonglegturned;027A +rmonospace;FF52 +rohiragana;308D +rokatakana;30ED +rokatakanahalfwidth;FF9B +roruathai;0E23 +rparen;24AD +rrabengali;09DC +rradeva;0931 +rragurmukhi;0A5C +rreharabic;0691 +rrehfinalarabic;FB8D +rrvocalicbengali;09E0 +rrvocalicdeva;0960 +rrvocalicgujarati;0AE0 +rrvocalicvowelsignbengali;09C4 +rrvocalicvowelsigndeva;0944 +rrvocalicvowelsigngujarati;0AC4 +rsuperior;F6F1 +rtblock;2590 +rturned;0279 +rturnedsuperior;02B4 +ruhiragana;308B +rukatakana;30EB +rukatakanahalfwidth;FF99 +rupeemarkbengali;09F2 +rupeesignbengali;09F3 +rupiah;F6DD +ruthai;0E24 +rvocalicbengali;098B +rvocalicdeva;090B +rvocalicgujarati;0A8B +rvocalicvowelsignbengali;09C3 +rvocalicvowelsigndeva;0943 +rvocalicvowelsigngujarati;0AC3 +s;0073 +sabengali;09B8 +sacute;015B +sacutedotaccent;1E65 +sadarabic;0635 +sadeva;0938 +sadfinalarabic;FEBA +sadinitialarabic;FEBB +sadmedialarabic;FEBC +sagujarati;0AB8 +sagurmukhi;0A38 +sahiragana;3055 +sakatakana;30B5 +sakatakanahalfwidth;FF7B +sallallahoualayhewasallamarabic;FDFA +samekh;05E1 +samekhdagesh;FB41 +samekhdageshhebrew;FB41 +samekhhebrew;05E1 +saraaathai;0E32 +saraaethai;0E41 +saraaimaimalaithai;0E44 +saraaimaimuanthai;0E43 +saraamthai;0E33 +saraathai;0E30 +saraethai;0E40 +saraiileftthai;F886 +saraiithai;0E35 +saraileftthai;F885 +saraithai;0E34 +saraothai;0E42 +saraueeleftthai;F888 +saraueethai;0E37 +saraueleftthai;F887 +sarauethai;0E36 +sarauthai;0E38 +sarauuthai;0E39 +sbopomofo;3119 +scaron;0161 +scarondotaccent;1E67 +scedilla;015F +schwa;0259 +schwacyrillic;04D9 +schwadieresiscyrillic;04DB +schwahook;025A +scircle;24E2 +scircumflex;015D +scommaaccent;0219 +sdotaccent;1E61 +sdotbelow;1E63 +sdotbelowdotaccent;1E69 +seagullbelowcmb;033C +second;2033 +secondtonechinese;02CA +section;00A7 +seenarabic;0633 +seenfinalarabic;FEB2 +seeninitialarabic;FEB3 +seenmedialarabic;FEB4 +segol;05B6 +segol13;05B6 +segol1f;05B6 +segol2c;05B6 +segolhebrew;05B6 +segolnarrowhebrew;05B6 +segolquarterhebrew;05B6 +segoltahebrew;0592 +segolwidehebrew;05B6 +seharmenian;057D +sehiragana;305B +sekatakana;30BB +sekatakanahalfwidth;FF7E +semicolon;003B +semicolonarabic;061B +semicolonmonospace;FF1B +semicolonsmall;FE54 +semivoicedmarkkana;309C +semivoicedmarkkanahalfwidth;FF9F +sentisquare;3322 +sentosquare;3323 +seven;0037 +sevenarabic;0667 +sevenbengali;09ED +sevencircle;2466 +sevencircleinversesansserif;2790 +sevendeva;096D +seveneighths;215E +sevengujarati;0AED +sevengurmukhi;0A6D +sevenhackarabic;0667 +sevenhangzhou;3027 +sevenideographicparen;3226 +seveninferior;2087 +sevenmonospace;FF17 +sevenoldstyle;F737 +sevenparen;247A +sevenperiod;248E +sevenpersian;06F7 +sevenroman;2176 +sevensuperior;2077 +seventeencircle;2470 +seventeenparen;2484 +seventeenperiod;2498 +seventhai;0E57 +sfthyphen;00AD +shaarmenian;0577 +shabengali;09B6 +shacyrillic;0448 +shaddaarabic;0651 +shaddadammaarabic;FC61 +shaddadammatanarabic;FC5E +shaddafathaarabic;FC60 +shaddafathatanarabic;0651 064B +shaddakasraarabic;FC62 +shaddakasratanarabic;FC5F +shade;2592 +shadedark;2593 +shadelight;2591 +shademedium;2592 +shadeva;0936 +shagujarati;0AB6 +shagurmukhi;0A36 +shalshelethebrew;0593 +shbopomofo;3115 +shchacyrillic;0449 +sheenarabic;0634 +sheenfinalarabic;FEB6 +sheeninitialarabic;FEB7 +sheenmedialarabic;FEB8 +sheicoptic;03E3 +sheqel;20AA +sheqelhebrew;20AA +sheva;05B0 +sheva115;05B0 +sheva15;05B0 +sheva22;05B0 +sheva2e;05B0 +shevahebrew;05B0 +shevanarrowhebrew;05B0 +shevaquarterhebrew;05B0 +shevawidehebrew;05B0 +shhacyrillic;04BB +shimacoptic;03ED +shin;05E9 +shindagesh;FB49 +shindageshhebrew;FB49 +shindageshshindot;FB2C +shindageshshindothebrew;FB2C +shindageshsindot;FB2D +shindageshsindothebrew;FB2D +shindothebrew;05C1 +shinhebrew;05E9 +shinshindot;FB2A +shinshindothebrew;FB2A +shinsindot;FB2B +shinsindothebrew;FB2B +shook;0282 +sigma;03C3 +sigma1;03C2 +sigmafinal;03C2 +sigmalunatesymbolgreek;03F2 +sihiragana;3057 +sikatakana;30B7 +sikatakanahalfwidth;FF7C +siluqhebrew;05BD +siluqlefthebrew;05BD +similar;223C +sindothebrew;05C2 +siosacirclekorean;3274 +siosaparenkorean;3214 +sioscieuckorean;317E +sioscirclekorean;3266 +sioskiyeokkorean;317A +sioskorean;3145 +siosnieunkorean;317B +siosparenkorean;3206 +siospieupkorean;317D +siostikeutkorean;317C +six;0036 +sixarabic;0666 +sixbengali;09EC +sixcircle;2465 +sixcircleinversesansserif;278F +sixdeva;096C +sixgujarati;0AEC +sixgurmukhi;0A6C +sixhackarabic;0666 +sixhangzhou;3026 +sixideographicparen;3225 +sixinferior;2086 +sixmonospace;FF16 +sixoldstyle;F736 +sixparen;2479 +sixperiod;248D +sixpersian;06F6 +sixroman;2175 +sixsuperior;2076 +sixteencircle;246F +sixteencurrencydenominatorbengali;09F9 +sixteenparen;2483 +sixteenperiod;2497 +sixthai;0E56 +slash;002F +slashmonospace;FF0F +slong;017F +slongdotaccent;1E9B +smileface;263A +smonospace;FF53 +sofpasuqhebrew;05C3 +softhyphen;00AD +softsigncyrillic;044C +sohiragana;305D +sokatakana;30BD +sokatakanahalfwidth;FF7F +soliduslongoverlaycmb;0338 +solidusshortoverlaycmb;0337 +sorusithai;0E29 +sosalathai;0E28 +sosothai;0E0B +sosuathai;0E2A +space;0020 +spacehackarabic;0020 +spade;2660 +spadesuitblack;2660 +spadesuitwhite;2664 +sparen;24AE +squarebelowcmb;033B +squarecc;33C4 +squarecm;339D +squarediagonalcrosshatchfill;25A9 +squarehorizontalfill;25A4 +squarekg;338F +squarekm;339E +squarekmcapital;33CE +squareln;33D1 +squarelog;33D2 +squaremg;338E +squaremil;33D5 +squaremm;339C +squaremsquared;33A1 +squareorthogonalcrosshatchfill;25A6 +squareupperlefttolowerrightfill;25A7 +squareupperrighttolowerleftfill;25A8 +squareverticalfill;25A5 +squarewhitewithsmallblack;25A3 +srsquare;33DB +ssabengali;09B7 +ssadeva;0937 +ssagujarati;0AB7 +ssangcieuckorean;3149 +ssanghieuhkorean;3185 +ssangieungkorean;3180 +ssangkiyeokkorean;3132 +ssangnieunkorean;3165 +ssangpieupkorean;3143 +ssangsioskorean;3146 +ssangtikeutkorean;3138 +ssuperior;F6F2 +sterling;00A3 +sterlingmonospace;FFE1 +strokelongoverlaycmb;0336 +strokeshortoverlaycmb;0335 +subset;2282 +subsetnotequal;228A +subsetorequal;2286 +succeeds;227B +suchthat;220B +suhiragana;3059 +sukatakana;30B9 +sukatakanahalfwidth;FF7D +sukunarabic;0652 +summation;2211 +sun;263C +superset;2283 +supersetnotequal;228B +supersetorequal;2287 +svsquare;33DC +syouwaerasquare;337C +t;0074 +tabengali;09A4 +tackdown;22A4 +tackleft;22A3 +tadeva;0924 +tagujarati;0AA4 +tagurmukhi;0A24 +taharabic;0637 +tahfinalarabic;FEC2 +tahinitialarabic;FEC3 +tahiragana;305F +tahmedialarabic;FEC4 +taisyouerasquare;337D +takatakana;30BF +takatakanahalfwidth;FF80 +tatweelarabic;0640 +tau;03C4 +tav;05EA +tavdages;FB4A +tavdagesh;FB4A +tavdageshhebrew;FB4A +tavhebrew;05EA +tbar;0167 +tbopomofo;310A +tcaron;0165 +tccurl;02A8 +tcedilla;0163 +tcheharabic;0686 +tchehfinalarabic;FB7B +tchehinitialarabic;FB7C +tchehmedialarabic;FB7D +tchehmeeminitialarabic;FB7C FEE4 +tcircle;24E3 +tcircumflexbelow;1E71 +tcommaaccent;0163 +tdieresis;1E97 +tdotaccent;1E6B +tdotbelow;1E6D +tecyrillic;0442 +tedescendercyrillic;04AD +teharabic;062A +tehfinalarabic;FE96 +tehhahinitialarabic;FCA2 +tehhahisolatedarabic;FC0C +tehinitialarabic;FE97 +tehiragana;3066 +tehjeeminitialarabic;FCA1 +tehjeemisolatedarabic;FC0B +tehmarbutaarabic;0629 +tehmarbutafinalarabic;FE94 +tehmedialarabic;FE98 +tehmeeminitialarabic;FCA4 +tehmeemisolatedarabic;FC0E +tehnoonfinalarabic;FC73 +tekatakana;30C6 +tekatakanahalfwidth;FF83 +telephone;2121 +telephoneblack;260E +telishagedolahebrew;05A0 +telishaqetanahebrew;05A9 +tencircle;2469 +tenideographicparen;3229 +tenparen;247D +tenperiod;2491 +tenroman;2179 +tesh;02A7 +tet;05D8 +tetdagesh;FB38 +tetdageshhebrew;FB38 +tethebrew;05D8 +tetsecyrillic;04B5 +tevirhebrew;059B +tevirlefthebrew;059B +thabengali;09A5 +thadeva;0925 +thagujarati;0AA5 +thagurmukhi;0A25 +thalarabic;0630 +thalfinalarabic;FEAC +thanthakhatlowleftthai;F898 +thanthakhatlowrightthai;F897 +thanthakhatthai;0E4C +thanthakhatupperleftthai;F896 +theharabic;062B +thehfinalarabic;FE9A +thehinitialarabic;FE9B +thehmedialarabic;FE9C +thereexists;2203 +therefore;2234 +theta;03B8 +theta1;03D1 +thetasymbolgreek;03D1 +thieuthacirclekorean;3279 +thieuthaparenkorean;3219 +thieuthcirclekorean;326B +thieuthkorean;314C +thieuthparenkorean;320B +thirteencircle;246C +thirteenparen;2480 +thirteenperiod;2494 +thonangmonthothai;0E11 +thook;01AD +thophuthaothai;0E12 +thorn;00FE +thothahanthai;0E17 +thothanthai;0E10 +thothongthai;0E18 +thothungthai;0E16 +thousandcyrillic;0482 +thousandsseparatorarabic;066C +thousandsseparatorpersian;066C +three;0033 +threearabic;0663 +threebengali;09E9 +threecircle;2462 +threecircleinversesansserif;278C +threedeva;0969 +threeeighths;215C +threegujarati;0AE9 +threegurmukhi;0A69 +threehackarabic;0663 +threehangzhou;3023 +threeideographicparen;3222 +threeinferior;2083 +threemonospace;FF13 +threenumeratorbengali;09F6 +threeoldstyle;F733 +threeparen;2476 +threeperiod;248A +threepersian;06F3 +threequarters;00BE +threequartersemdash;F6DE +threeroman;2172 +threesuperior;00B3 +threethai;0E53 +thzsquare;3394 +tihiragana;3061 +tikatakana;30C1 +tikatakanahalfwidth;FF81 +tikeutacirclekorean;3270 +tikeutaparenkorean;3210 +tikeutcirclekorean;3262 +tikeutkorean;3137 +tikeutparenkorean;3202 +tilde;02DC +tildebelowcmb;0330 +tildecmb;0303 +tildecomb;0303 +tildedoublecmb;0360 +tildeoperator;223C +tildeoverlaycmb;0334 +tildeverticalcmb;033E +timescircle;2297 +tipehahebrew;0596 +tipehalefthebrew;0596 +tippigurmukhi;0A70 +titlocyrilliccmb;0483 +tiwnarmenian;057F +tlinebelow;1E6F +tmonospace;FF54 +toarmenian;0569 +tohiragana;3068 +tokatakana;30C8 +tokatakanahalfwidth;FF84 +tonebarextrahighmod;02E5 +tonebarextralowmod;02E9 +tonebarhighmod;02E6 +tonebarlowmod;02E8 +tonebarmidmod;02E7 +tonefive;01BD +tonesix;0185 +tonetwo;01A8 +tonos;0384 +tonsquare;3327 +topatakthai;0E0F +tortoiseshellbracketleft;3014 +tortoiseshellbracketleftsmall;FE5D +tortoiseshellbracketleftvertical;FE39 +tortoiseshellbracketright;3015 +tortoiseshellbracketrightsmall;FE5E +tortoiseshellbracketrightvertical;FE3A +totaothai;0E15 +tpalatalhook;01AB +tparen;24AF +trademark;2122 +trademarksans;F8EA +trademarkserif;F6DB +tretroflexhook;0288 +triagdn;25BC +triaglf;25C4 +triagrt;25BA +triagup;25B2 +ts;02A6 +tsadi;05E6 +tsadidagesh;FB46 +tsadidageshhebrew;FB46 +tsadihebrew;05E6 +tsecyrillic;0446 +tsere;05B5 +tsere12;05B5 +tsere1e;05B5 +tsere2b;05B5 +tserehebrew;05B5 +tserenarrowhebrew;05B5 +tserequarterhebrew;05B5 +tserewidehebrew;05B5 +tshecyrillic;045B +tsuperior;F6F3 +ttabengali;099F +ttadeva;091F +ttagujarati;0A9F +ttagurmukhi;0A1F +tteharabic;0679 +ttehfinalarabic;FB67 +ttehinitialarabic;FB68 +ttehmedialarabic;FB69 +tthabengali;09A0 +tthadeva;0920 +tthagujarati;0AA0 +tthagurmukhi;0A20 +tturned;0287 +tuhiragana;3064 +tukatakana;30C4 +tukatakanahalfwidth;FF82 +tusmallhiragana;3063 +tusmallkatakana;30C3 +tusmallkatakanahalfwidth;FF6F +twelvecircle;246B +twelveparen;247F +twelveperiod;2493 +twelveroman;217B +twentycircle;2473 +twentyhangzhou;5344 +twentyparen;2487 +twentyperiod;249B +two;0032 +twoarabic;0662 +twobengali;09E8 +twocircle;2461 +twocircleinversesansserif;278B +twodeva;0968 +twodotenleader;2025 +twodotleader;2025 +twodotleadervertical;FE30 +twogujarati;0AE8 +twogurmukhi;0A68 +twohackarabic;0662 +twohangzhou;3022 +twoideographicparen;3221 +twoinferior;2082 +twomonospace;FF12 +twonumeratorbengali;09F5 +twooldstyle;F732 +twoparen;2475 +twoperiod;2489 +twopersian;06F2 +tworoman;2171 +twostroke;01BB +twosuperior;00B2 +twothai;0E52 +twothirds;2154 +u;0075 +uacute;00FA +ubar;0289 +ubengali;0989 +ubopomofo;3128 +ubreve;016D +ucaron;01D4 +ucircle;24E4 +ucircumflex;00FB +ucircumflexbelow;1E77 +ucyrillic;0443 +udattadeva;0951 +udblacute;0171 +udblgrave;0215 +udeva;0909 +udieresis;00FC +udieresisacute;01D8 +udieresisbelow;1E73 +udieresiscaron;01DA +udieresiscyrillic;04F1 +udieresisgrave;01DC +udieresismacron;01D6 +udotbelow;1EE5 +ugrave;00F9 +ugujarati;0A89 +ugurmukhi;0A09 +uhiragana;3046 +uhookabove;1EE7 +uhorn;01B0 +uhornacute;1EE9 +uhorndotbelow;1EF1 +uhorngrave;1EEB +uhornhookabove;1EED +uhorntilde;1EEF +uhungarumlaut;0171 +uhungarumlautcyrillic;04F3 +uinvertedbreve;0217 +ukatakana;30A6 +ukatakanahalfwidth;FF73 +ukcyrillic;0479 +ukorean;315C +umacron;016B +umacroncyrillic;04EF +umacrondieresis;1E7B +umatragurmukhi;0A41 +umonospace;FF55 +underscore;005F +underscoredbl;2017 +underscoremonospace;FF3F +underscorevertical;FE33 +underscorewavy;FE4F +union;222A +universal;2200 +uogonek;0173 +uparen;24B0 +upblock;2580 +upperdothebrew;05C4 +upsilon;03C5 +upsilondieresis;03CB +upsilondieresistonos;03B0 +upsilonlatin;028A +upsilontonos;03CD +uptackbelowcmb;031D +uptackmod;02D4 +uragurmukhi;0A73 +uring;016F +ushortcyrillic;045E +usmallhiragana;3045 +usmallkatakana;30A5 +usmallkatakanahalfwidth;FF69 +ustraightcyrillic;04AF +ustraightstrokecyrillic;04B1 +utilde;0169 +utildeacute;1E79 +utildebelow;1E75 +uubengali;098A +uudeva;090A +uugujarati;0A8A +uugurmukhi;0A0A +uumatragurmukhi;0A42 +uuvowelsignbengali;09C2 +uuvowelsigndeva;0942 +uuvowelsigngujarati;0AC2 +uvowelsignbengali;09C1 +uvowelsigndeva;0941 +uvowelsigngujarati;0AC1 +v;0076 +vadeva;0935 +vagujarati;0AB5 +vagurmukhi;0A35 +vakatakana;30F7 +vav;05D5 +vavdagesh;FB35 +vavdagesh65;FB35 +vavdageshhebrew;FB35 +vavhebrew;05D5 +vavholam;FB4B +vavholamhebrew;FB4B +vavvavhebrew;05F0 +vavyodhebrew;05F1 +vcircle;24E5 +vdotbelow;1E7F +vecyrillic;0432 +veharabic;06A4 +vehfinalarabic;FB6B +vehinitialarabic;FB6C +vehmedialarabic;FB6D +vekatakana;30F9 +venus;2640 +verticalbar;007C +verticallineabovecmb;030D +verticallinebelowcmb;0329 +verticallinelowmod;02CC +verticallinemod;02C8 +vewarmenian;057E +vhook;028B +vikatakana;30F8 +viramabengali;09CD +viramadeva;094D +viramagujarati;0ACD +visargabengali;0983 +visargadeva;0903 +visargagujarati;0A83 +vmonospace;FF56 +voarmenian;0578 +voicediterationhiragana;309E +voicediterationkatakana;30FE +voicedmarkkana;309B +voicedmarkkanahalfwidth;FF9E +vokatakana;30FA +vparen;24B1 +vtilde;1E7D +vturned;028C +vuhiragana;3094 +vukatakana;30F4 +w;0077 +wacute;1E83 +waekorean;3159 +wahiragana;308F +wakatakana;30EF +wakatakanahalfwidth;FF9C +wakorean;3158 +wasmallhiragana;308E +wasmallkatakana;30EE +wattosquare;3357 +wavedash;301C +wavyunderscorevertical;FE34 +wawarabic;0648 +wawfinalarabic;FEEE +wawhamzaabovearabic;0624 +wawhamzaabovefinalarabic;FE86 +wbsquare;33DD +wcircle;24E6 +wcircumflex;0175 +wdieresis;1E85 +wdotaccent;1E87 +wdotbelow;1E89 +wehiragana;3091 +weierstrass;2118 +wekatakana;30F1 +wekorean;315E +weokorean;315D +wgrave;1E81 +whitebullet;25E6 +whitecircle;25CB +whitecircleinverse;25D9 +whitecornerbracketleft;300E +whitecornerbracketleftvertical;FE43 +whitecornerbracketright;300F +whitecornerbracketrightvertical;FE44 +whitediamond;25C7 +whitediamondcontainingblacksmalldiamond;25C8 +whitedownpointingsmalltriangle;25BF +whitedownpointingtriangle;25BD +whiteleftpointingsmalltriangle;25C3 +whiteleftpointingtriangle;25C1 +whitelenticularbracketleft;3016 +whitelenticularbracketright;3017 +whiterightpointingsmalltriangle;25B9 +whiterightpointingtriangle;25B7 +whitesmallsquare;25AB +whitesmilingface;263A +whitesquare;25A1 +whitestar;2606 +whitetelephone;260F +whitetortoiseshellbracketleft;3018 +whitetortoiseshellbracketright;3019 +whiteuppointingsmalltriangle;25B5 +whiteuppointingtriangle;25B3 +wihiragana;3090 +wikatakana;30F0 +wikorean;315F +wmonospace;FF57 +wohiragana;3092 +wokatakana;30F2 +wokatakanahalfwidth;FF66 +won;20A9 +wonmonospace;FFE6 +wowaenthai;0E27 +wparen;24B2 +wring;1E98 +wsuperior;02B7 +wturned;028D +wynn;01BF +x;0078 +xabovecmb;033D +xbopomofo;3112 +xcircle;24E7 +xdieresis;1E8D +xdotaccent;1E8B +xeharmenian;056D +xi;03BE +xmonospace;FF58 +xparen;24B3 +xsuperior;02E3 +y;0079 +yaadosquare;334E +yabengali;09AF +yacute;00FD +yadeva;092F +yaekorean;3152 +yagujarati;0AAF +yagurmukhi;0A2F +yahiragana;3084 +yakatakana;30E4 +yakatakanahalfwidth;FF94 +yakorean;3151 +yamakkanthai;0E4E +yasmallhiragana;3083 +yasmallkatakana;30E3 +yasmallkatakanahalfwidth;FF6C +yatcyrillic;0463 +ycircle;24E8 +ycircumflex;0177 +ydieresis;00FF +ydotaccent;1E8F +ydotbelow;1EF5 +yeharabic;064A +yehbarreearabic;06D2 +yehbarreefinalarabic;FBAF +yehfinalarabic;FEF2 +yehhamzaabovearabic;0626 +yehhamzaabovefinalarabic;FE8A +yehhamzaaboveinitialarabic;FE8B +yehhamzaabovemedialarabic;FE8C +yehinitialarabic;FEF3 +yehmedialarabic;FEF4 +yehmeeminitialarabic;FCDD +yehmeemisolatedarabic;FC58 +yehnoonfinalarabic;FC94 +yehthreedotsbelowarabic;06D1 +yekorean;3156 +yen;00A5 +yenmonospace;FFE5 +yeokorean;3155 +yeorinhieuhkorean;3186 +yerahbenyomohebrew;05AA +yerahbenyomolefthebrew;05AA +yericyrillic;044B +yerudieresiscyrillic;04F9 +yesieungkorean;3181 +yesieungpansioskorean;3183 +yesieungsioskorean;3182 +yetivhebrew;059A +ygrave;1EF3 +yhook;01B4 +yhookabove;1EF7 +yiarmenian;0575 +yicyrillic;0457 +yikorean;3162 +yinyang;262F +yiwnarmenian;0582 +ymonospace;FF59 +yod;05D9 +yoddagesh;FB39 +yoddageshhebrew;FB39 +yodhebrew;05D9 +yodyodhebrew;05F2 +yodyodpatahhebrew;FB1F +yohiragana;3088 +yoikorean;3189 +yokatakana;30E8 +yokatakanahalfwidth;FF96 +yokorean;315B +yosmallhiragana;3087 +yosmallkatakana;30E7 +yosmallkatakanahalfwidth;FF6E +yotgreek;03F3 +yoyaekorean;3188 +yoyakorean;3187 +yoyakthai;0E22 +yoyingthai;0E0D +yparen;24B4 +ypogegrammeni;037A +ypogegrammenigreekcmb;0345 +yr;01A6 +yring;1E99 +ysuperior;02B8 +ytilde;1EF9 +yturned;028E +yuhiragana;3086 +yuikorean;318C +yukatakana;30E6 +yukatakanahalfwidth;FF95 +yukorean;3160 +yusbigcyrillic;046B +yusbigiotifiedcyrillic;046D +yuslittlecyrillic;0467 +yuslittleiotifiedcyrillic;0469 +yusmallhiragana;3085 +yusmallkatakana;30E5 +yusmallkatakanahalfwidth;FF6D +yuyekorean;318B +yuyeokorean;318A +yyabengali;09DF +yyadeva;095F +z;007A +zaarmenian;0566 +zacute;017A +zadeva;095B +zagurmukhi;0A5B +zaharabic;0638 +zahfinalarabic;FEC6 +zahinitialarabic;FEC7 +zahiragana;3056 +zahmedialarabic;FEC8 +zainarabic;0632 +zainfinalarabic;FEB0 +zakatakana;30B6 +zaqefgadolhebrew;0595 +zaqefqatanhebrew;0594 +zarqahebrew;0598 +zayin;05D6 +zayindagesh;FB36 +zayindageshhebrew;FB36 +zayinhebrew;05D6 +zbopomofo;3117 +zcaron;017E +zcircle;24E9 +zcircumflex;1E91 +zcurl;0291 +zdot;017C +zdotaccent;017C +zdotbelow;1E93 +zecyrillic;0437 +zedescendercyrillic;0499 +zedieresiscyrillic;04DF +zehiragana;305C +zekatakana;30BC +zero;0030 +zeroarabic;0660 +zerobengali;09E6 +zerodeva;0966 +zerogujarati;0AE6 +zerogurmukhi;0A66 +zerohackarabic;0660 +zeroinferior;2080 +zeromonospace;FF10 +zerooldstyle;F730 +zeropersian;06F0 +zerosuperior;2070 +zerothai;0E50 +zerowidthjoiner;FEFF +zerowidthnonjoiner;200C +zerowidthspace;200B +zeta;03B6 +zhbopomofo;3113 +zhearmenian;056A +zhebrevecyrillic;04C2 +zhecyrillic;0436 +zhedescendercyrillic;0497 +zhedieresiscyrillic;04DD +zihiragana;3058 +zikatakana;30B8 +zinorhebrew;05AE +zlinebelow;1E95 +zmonospace;FF5A +zohiragana;305E +zokatakana;30BE +zparen;24B5 +zretroflexhook;0290 +zstroke;01B6 +zuhiragana;305A +zukatakana;30BA +# END +""" + + +_aglfnText = """\ +# ----------------------------------------------------------- +# Copyright 2002-2019 Adobe (http://www.adobe.com/). +# +# Redistribution and use in source and binary forms, with or +# without modification, are permitted provided that the +# following conditions are met: +# +# Redistributions of source code must retain the above +# copyright notice, this list of conditions and the following +# disclaimer. +# +# Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials +# provided with the distribution. +# +# Neither the name of Adobe nor the names of its contributors +# may be used to endorse or promote products derived from this +# software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND +# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# ----------------------------------------------------------- +# Name: Adobe Glyph List For New Fonts +# Table version: 1.7 +# Date: November 6, 2008 +# URL: https://github.com/adobe-type-tools/agl-aglfn +# +# Description: +# +# AGLFN (Adobe Glyph List For New Fonts) provides a list of base glyph +# names that are recommended for new fonts, which are compatible with +# the AGL (Adobe Glyph List) Specification, and which should be used +# as described in Section 6 of that document. AGLFN comprises the set +# of glyph names from AGL that map via the AGL Specification rules to +# the semantically correct UV (Unicode Value). For example, "Asmall" +# is omitted because AGL maps this glyph name to the PUA (Private Use +# Area) value U+F761, rather than to the UV that maps from the glyph +# name "A." Also omitted is "ffi," because AGL maps this to the +# Alphabetic Presentation Forms value U+FB03, rather than decomposing +# it into the following sequence of three UVs: U+0066, U+0066, and +# U+0069. The name "arrowvertex" has been omitted because this glyph +# now has a real UV, and AGL is now incorrect in mapping it to the PUA +# value U+F8E6. If you do not find an appropriate name for your glyph +# in this list, then please refer to Section 6 of the AGL +# Specification. +# +# Format: three semicolon-delimited fields: +# (1) Standard UV or CUS UV--four uppercase hexadecimal digits +# (2) Glyph name--upper/lowercase letters and digits +# (3) Character names: Unicode character names for standard UVs, and +# descriptive names for CUS UVs--uppercase letters, hyphen, and +# space +# +# The records are sorted by glyph name in increasing ASCII order, +# entries with the same glyph name are sorted in decreasing priority +# order, the UVs and Unicode character names are provided for +# convenience, lines starting with "#" are comments, and blank lines +# should be ignored. +# +# Revision History: +# +# 1.7 [6 November 2008] +# - Reverted to the original 1.4 and earlier mappings for Delta, +# Omega, and mu. +# - Removed mappings for "afii" names. These should now be assigned +# "uni" names. +# - Removed mappings for "commaaccent" names. These should now be +# assigned "uni" names. +# +# 1.6 [30 January 2006] +# - Completed work intended in 1.5. +# +# 1.5 [23 November 2005] +# - Removed duplicated block at end of file. +# - Changed mappings: +# 2206;Delta;INCREMENT changed to 0394;Delta;GREEK CAPITAL LETTER DELTA +# 2126;Omega;OHM SIGN changed to 03A9;Omega;GREEK CAPITAL LETTER OMEGA +# 03BC;mu;MICRO SIGN changed to 03BC;mu;GREEK SMALL LETTER MU +# - Corrected statement above about why "ffi" is omitted. +# +# 1.4 [24 September 2003] +# - Changed version to 1.4, to avoid confusion with the AGL 1.3. +# - Fixed spelling errors in the header. +# - Fully removed "arrowvertex," as it is mapped only to a PUA Unicode +# value in some fonts. +# +# 1.1 [17 April 2003] +# - Renamed [Tt]cedilla back to [Tt]commaaccent. +# +# 1.0 [31 January 2003] +# - Original version. +# - Derived from the AGLv1.2 by: +# removing the PUA area codes; +# removing duplicate Unicode mappings; and +# renaming "tcommaaccent" to "tcedilla" and "Tcommaaccent" to "Tcedilla" +# +0041;A;LATIN CAPITAL LETTER A +00C6;AE;LATIN CAPITAL LETTER AE +01FC;AEacute;LATIN CAPITAL LETTER AE WITH ACUTE +00C1;Aacute;LATIN CAPITAL LETTER A WITH ACUTE +0102;Abreve;LATIN CAPITAL LETTER A WITH BREVE +00C2;Acircumflex;LATIN CAPITAL LETTER A WITH CIRCUMFLEX +00C4;Adieresis;LATIN CAPITAL LETTER A WITH DIAERESIS +00C0;Agrave;LATIN CAPITAL LETTER A WITH GRAVE +0391;Alpha;GREEK CAPITAL LETTER ALPHA +0386;Alphatonos;GREEK CAPITAL LETTER ALPHA WITH TONOS +0100;Amacron;LATIN CAPITAL LETTER A WITH MACRON +0104;Aogonek;LATIN CAPITAL LETTER A WITH OGONEK +00C5;Aring;LATIN CAPITAL LETTER A WITH RING ABOVE +01FA;Aringacute;LATIN CAPITAL LETTER A WITH RING ABOVE AND ACUTE +00C3;Atilde;LATIN CAPITAL LETTER A WITH TILDE +0042;B;LATIN CAPITAL LETTER B +0392;Beta;GREEK CAPITAL LETTER BETA +0043;C;LATIN CAPITAL LETTER C +0106;Cacute;LATIN CAPITAL LETTER C WITH ACUTE +010C;Ccaron;LATIN CAPITAL LETTER C WITH CARON +00C7;Ccedilla;LATIN CAPITAL LETTER C WITH CEDILLA +0108;Ccircumflex;LATIN CAPITAL LETTER C WITH CIRCUMFLEX +010A;Cdotaccent;LATIN CAPITAL LETTER C WITH DOT ABOVE +03A7;Chi;GREEK CAPITAL LETTER CHI +0044;D;LATIN CAPITAL LETTER D +010E;Dcaron;LATIN CAPITAL LETTER D WITH CARON +0110;Dcroat;LATIN CAPITAL LETTER D WITH STROKE +2206;Delta;INCREMENT +0045;E;LATIN CAPITAL LETTER E +00C9;Eacute;LATIN CAPITAL LETTER E WITH ACUTE +0114;Ebreve;LATIN CAPITAL LETTER E WITH BREVE +011A;Ecaron;LATIN CAPITAL LETTER E WITH CARON +00CA;Ecircumflex;LATIN CAPITAL LETTER E WITH CIRCUMFLEX +00CB;Edieresis;LATIN CAPITAL LETTER E WITH DIAERESIS +0116;Edotaccent;LATIN CAPITAL LETTER E WITH DOT ABOVE +00C8;Egrave;LATIN CAPITAL LETTER E WITH GRAVE +0112;Emacron;LATIN CAPITAL LETTER E WITH MACRON +014A;Eng;LATIN CAPITAL LETTER ENG +0118;Eogonek;LATIN CAPITAL LETTER E WITH OGONEK +0395;Epsilon;GREEK CAPITAL LETTER EPSILON +0388;Epsilontonos;GREEK CAPITAL LETTER EPSILON WITH TONOS +0397;Eta;GREEK CAPITAL LETTER ETA +0389;Etatonos;GREEK CAPITAL LETTER ETA WITH TONOS +00D0;Eth;LATIN CAPITAL LETTER ETH +20AC;Euro;EURO SIGN +0046;F;LATIN CAPITAL LETTER F +0047;G;LATIN CAPITAL LETTER G +0393;Gamma;GREEK CAPITAL LETTER GAMMA +011E;Gbreve;LATIN CAPITAL LETTER G WITH BREVE +01E6;Gcaron;LATIN CAPITAL LETTER G WITH CARON +011C;Gcircumflex;LATIN CAPITAL LETTER G WITH CIRCUMFLEX +0120;Gdotaccent;LATIN CAPITAL LETTER G WITH DOT ABOVE +0048;H;LATIN CAPITAL LETTER H +25CF;H18533;BLACK CIRCLE +25AA;H18543;BLACK SMALL SQUARE +25AB;H18551;WHITE SMALL SQUARE +25A1;H22073;WHITE SQUARE +0126;Hbar;LATIN CAPITAL LETTER H WITH STROKE +0124;Hcircumflex;LATIN CAPITAL LETTER H WITH CIRCUMFLEX +0049;I;LATIN CAPITAL LETTER I +0132;IJ;LATIN CAPITAL LIGATURE IJ +00CD;Iacute;LATIN CAPITAL LETTER I WITH ACUTE +012C;Ibreve;LATIN CAPITAL LETTER I WITH BREVE +00CE;Icircumflex;LATIN CAPITAL LETTER I WITH CIRCUMFLEX +00CF;Idieresis;LATIN CAPITAL LETTER I WITH DIAERESIS +0130;Idotaccent;LATIN CAPITAL LETTER I WITH DOT ABOVE +2111;Ifraktur;BLACK-LETTER CAPITAL I +00CC;Igrave;LATIN CAPITAL LETTER I WITH GRAVE +012A;Imacron;LATIN CAPITAL LETTER I WITH MACRON +012E;Iogonek;LATIN CAPITAL LETTER I WITH OGONEK +0399;Iota;GREEK CAPITAL LETTER IOTA +03AA;Iotadieresis;GREEK CAPITAL LETTER IOTA WITH DIALYTIKA +038A;Iotatonos;GREEK CAPITAL LETTER IOTA WITH TONOS +0128;Itilde;LATIN CAPITAL LETTER I WITH TILDE +004A;J;LATIN CAPITAL LETTER J +0134;Jcircumflex;LATIN CAPITAL LETTER J WITH CIRCUMFLEX +004B;K;LATIN CAPITAL LETTER K +039A;Kappa;GREEK CAPITAL LETTER KAPPA +004C;L;LATIN CAPITAL LETTER L +0139;Lacute;LATIN CAPITAL LETTER L WITH ACUTE +039B;Lambda;GREEK CAPITAL LETTER LAMDA +013D;Lcaron;LATIN CAPITAL LETTER L WITH CARON +013F;Ldot;LATIN CAPITAL LETTER L WITH MIDDLE DOT +0141;Lslash;LATIN CAPITAL LETTER L WITH STROKE +004D;M;LATIN CAPITAL LETTER M +039C;Mu;GREEK CAPITAL LETTER MU +004E;N;LATIN CAPITAL LETTER N +0143;Nacute;LATIN CAPITAL LETTER N WITH ACUTE +0147;Ncaron;LATIN CAPITAL LETTER N WITH CARON +00D1;Ntilde;LATIN CAPITAL LETTER N WITH TILDE +039D;Nu;GREEK CAPITAL LETTER NU +004F;O;LATIN CAPITAL LETTER O +0152;OE;LATIN CAPITAL LIGATURE OE +00D3;Oacute;LATIN CAPITAL LETTER O WITH ACUTE +014E;Obreve;LATIN CAPITAL LETTER O WITH BREVE +00D4;Ocircumflex;LATIN CAPITAL LETTER O WITH CIRCUMFLEX +00D6;Odieresis;LATIN CAPITAL LETTER O WITH DIAERESIS +00D2;Ograve;LATIN CAPITAL LETTER O WITH GRAVE +01A0;Ohorn;LATIN CAPITAL LETTER O WITH HORN +0150;Ohungarumlaut;LATIN CAPITAL LETTER O WITH DOUBLE ACUTE +014C;Omacron;LATIN CAPITAL LETTER O WITH MACRON +2126;Omega;OHM SIGN +038F;Omegatonos;GREEK CAPITAL LETTER OMEGA WITH TONOS +039F;Omicron;GREEK CAPITAL LETTER OMICRON +038C;Omicrontonos;GREEK CAPITAL LETTER OMICRON WITH TONOS +00D8;Oslash;LATIN CAPITAL LETTER O WITH STROKE +01FE;Oslashacute;LATIN CAPITAL LETTER O WITH STROKE AND ACUTE +00D5;Otilde;LATIN CAPITAL LETTER O WITH TILDE +0050;P;LATIN CAPITAL LETTER P +03A6;Phi;GREEK CAPITAL LETTER PHI +03A0;Pi;GREEK CAPITAL LETTER PI +03A8;Psi;GREEK CAPITAL LETTER PSI +0051;Q;LATIN CAPITAL LETTER Q +0052;R;LATIN CAPITAL LETTER R +0154;Racute;LATIN CAPITAL LETTER R WITH ACUTE +0158;Rcaron;LATIN CAPITAL LETTER R WITH CARON +211C;Rfraktur;BLACK-LETTER CAPITAL R +03A1;Rho;GREEK CAPITAL LETTER RHO +0053;S;LATIN CAPITAL LETTER S +250C;SF010000;BOX DRAWINGS LIGHT DOWN AND RIGHT +2514;SF020000;BOX DRAWINGS LIGHT UP AND RIGHT +2510;SF030000;BOX DRAWINGS LIGHT DOWN AND LEFT +2518;SF040000;BOX DRAWINGS LIGHT UP AND LEFT +253C;SF050000;BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL +252C;SF060000;BOX DRAWINGS LIGHT DOWN AND HORIZONTAL +2534;SF070000;BOX DRAWINGS LIGHT UP AND HORIZONTAL +251C;SF080000;BOX DRAWINGS LIGHT VERTICAL AND RIGHT +2524;SF090000;BOX DRAWINGS LIGHT VERTICAL AND LEFT +2500;SF100000;BOX DRAWINGS LIGHT HORIZONTAL +2502;SF110000;BOX DRAWINGS LIGHT VERTICAL +2561;SF190000;BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE +2562;SF200000;BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE +2556;SF210000;BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE +2555;SF220000;BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE +2563;SF230000;BOX DRAWINGS DOUBLE VERTICAL AND LEFT +2551;SF240000;BOX DRAWINGS DOUBLE VERTICAL +2557;SF250000;BOX DRAWINGS DOUBLE DOWN AND LEFT +255D;SF260000;BOX DRAWINGS DOUBLE UP AND LEFT +255C;SF270000;BOX DRAWINGS UP DOUBLE AND LEFT SINGLE +255B;SF280000;BOX DRAWINGS UP SINGLE AND LEFT DOUBLE +255E;SF360000;BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE +255F;SF370000;BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE +255A;SF380000;BOX DRAWINGS DOUBLE UP AND RIGHT +2554;SF390000;BOX DRAWINGS DOUBLE DOWN AND RIGHT +2569;SF400000;BOX DRAWINGS DOUBLE UP AND HORIZONTAL +2566;SF410000;BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL +2560;SF420000;BOX DRAWINGS DOUBLE VERTICAL AND RIGHT +2550;SF430000;BOX DRAWINGS DOUBLE HORIZONTAL +256C;SF440000;BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL +2567;SF450000;BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE +2568;SF460000;BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE +2564;SF470000;BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE +2565;SF480000;BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE +2559;SF490000;BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE +2558;SF500000;BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE +2552;SF510000;BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE +2553;SF520000;BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE +256B;SF530000;BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE +256A;SF540000;BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE +015A;Sacute;LATIN CAPITAL LETTER S WITH ACUTE +0160;Scaron;LATIN CAPITAL LETTER S WITH CARON +015E;Scedilla;LATIN CAPITAL LETTER S WITH CEDILLA +015C;Scircumflex;LATIN CAPITAL LETTER S WITH CIRCUMFLEX +03A3;Sigma;GREEK CAPITAL LETTER SIGMA +0054;T;LATIN CAPITAL LETTER T +03A4;Tau;GREEK CAPITAL LETTER TAU +0166;Tbar;LATIN CAPITAL LETTER T WITH STROKE +0164;Tcaron;LATIN CAPITAL LETTER T WITH CARON +0398;Theta;GREEK CAPITAL LETTER THETA +00DE;Thorn;LATIN CAPITAL LETTER THORN +0055;U;LATIN CAPITAL LETTER U +00DA;Uacute;LATIN CAPITAL LETTER U WITH ACUTE +016C;Ubreve;LATIN CAPITAL LETTER U WITH BREVE +00DB;Ucircumflex;LATIN CAPITAL LETTER U WITH CIRCUMFLEX +00DC;Udieresis;LATIN CAPITAL LETTER U WITH DIAERESIS +00D9;Ugrave;LATIN CAPITAL LETTER U WITH GRAVE +01AF;Uhorn;LATIN CAPITAL LETTER U WITH HORN +0170;Uhungarumlaut;LATIN CAPITAL LETTER U WITH DOUBLE ACUTE +016A;Umacron;LATIN CAPITAL LETTER U WITH MACRON +0172;Uogonek;LATIN CAPITAL LETTER U WITH OGONEK +03A5;Upsilon;GREEK CAPITAL LETTER UPSILON +03D2;Upsilon1;GREEK UPSILON WITH HOOK SYMBOL +03AB;Upsilondieresis;GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA +038E;Upsilontonos;GREEK CAPITAL LETTER UPSILON WITH TONOS +016E;Uring;LATIN CAPITAL LETTER U WITH RING ABOVE +0168;Utilde;LATIN CAPITAL LETTER U WITH TILDE +0056;V;LATIN CAPITAL LETTER V +0057;W;LATIN CAPITAL LETTER W +1E82;Wacute;LATIN CAPITAL LETTER W WITH ACUTE +0174;Wcircumflex;LATIN CAPITAL LETTER W WITH CIRCUMFLEX +1E84;Wdieresis;LATIN CAPITAL LETTER W WITH DIAERESIS +1E80;Wgrave;LATIN CAPITAL LETTER W WITH GRAVE +0058;X;LATIN CAPITAL LETTER X +039E;Xi;GREEK CAPITAL LETTER XI +0059;Y;LATIN CAPITAL LETTER Y +00DD;Yacute;LATIN CAPITAL LETTER Y WITH ACUTE +0176;Ycircumflex;LATIN CAPITAL LETTER Y WITH CIRCUMFLEX +0178;Ydieresis;LATIN CAPITAL LETTER Y WITH DIAERESIS +1EF2;Ygrave;LATIN CAPITAL LETTER Y WITH GRAVE +005A;Z;LATIN CAPITAL LETTER Z +0179;Zacute;LATIN CAPITAL LETTER Z WITH ACUTE +017D;Zcaron;LATIN CAPITAL LETTER Z WITH CARON +017B;Zdotaccent;LATIN CAPITAL LETTER Z WITH DOT ABOVE +0396;Zeta;GREEK CAPITAL LETTER ZETA +0061;a;LATIN SMALL LETTER A +00E1;aacute;LATIN SMALL LETTER A WITH ACUTE +0103;abreve;LATIN SMALL LETTER A WITH BREVE +00E2;acircumflex;LATIN SMALL LETTER A WITH CIRCUMFLEX +00B4;acute;ACUTE ACCENT +0301;acutecomb;COMBINING ACUTE ACCENT +00E4;adieresis;LATIN SMALL LETTER A WITH DIAERESIS +00E6;ae;LATIN SMALL LETTER AE +01FD;aeacute;LATIN SMALL LETTER AE WITH ACUTE +00E0;agrave;LATIN SMALL LETTER A WITH GRAVE +2135;aleph;ALEF SYMBOL +03B1;alpha;GREEK SMALL LETTER ALPHA +03AC;alphatonos;GREEK SMALL LETTER ALPHA WITH TONOS +0101;amacron;LATIN SMALL LETTER A WITH MACRON +0026;ampersand;AMPERSAND +2220;angle;ANGLE +2329;angleleft;LEFT-POINTING ANGLE BRACKET +232A;angleright;RIGHT-POINTING ANGLE BRACKET +0387;anoteleia;GREEK ANO TELEIA +0105;aogonek;LATIN SMALL LETTER A WITH OGONEK +2248;approxequal;ALMOST EQUAL TO +00E5;aring;LATIN SMALL LETTER A WITH RING ABOVE +01FB;aringacute;LATIN SMALL LETTER A WITH RING ABOVE AND ACUTE +2194;arrowboth;LEFT RIGHT ARROW +21D4;arrowdblboth;LEFT RIGHT DOUBLE ARROW +21D3;arrowdbldown;DOWNWARDS DOUBLE ARROW +21D0;arrowdblleft;LEFTWARDS DOUBLE ARROW +21D2;arrowdblright;RIGHTWARDS DOUBLE ARROW +21D1;arrowdblup;UPWARDS DOUBLE ARROW +2193;arrowdown;DOWNWARDS ARROW +2190;arrowleft;LEFTWARDS ARROW +2192;arrowright;RIGHTWARDS ARROW +2191;arrowup;UPWARDS ARROW +2195;arrowupdn;UP DOWN ARROW +21A8;arrowupdnbse;UP DOWN ARROW WITH BASE +005E;asciicircum;CIRCUMFLEX ACCENT +007E;asciitilde;TILDE +002A;asterisk;ASTERISK +2217;asteriskmath;ASTERISK OPERATOR +0040;at;COMMERCIAL AT +00E3;atilde;LATIN SMALL LETTER A WITH TILDE +0062;b;LATIN SMALL LETTER B +005C;backslash;REVERSE SOLIDUS +007C;bar;VERTICAL LINE +03B2;beta;GREEK SMALL LETTER BETA +2588;block;FULL BLOCK +007B;braceleft;LEFT CURLY BRACKET +007D;braceright;RIGHT CURLY BRACKET +005B;bracketleft;LEFT SQUARE BRACKET +005D;bracketright;RIGHT SQUARE BRACKET +02D8;breve;BREVE +00A6;brokenbar;BROKEN BAR +2022;bullet;BULLET +0063;c;LATIN SMALL LETTER C +0107;cacute;LATIN SMALL LETTER C WITH ACUTE +02C7;caron;CARON +21B5;carriagereturn;DOWNWARDS ARROW WITH CORNER LEFTWARDS +010D;ccaron;LATIN SMALL LETTER C WITH CARON +00E7;ccedilla;LATIN SMALL LETTER C WITH CEDILLA +0109;ccircumflex;LATIN SMALL LETTER C WITH CIRCUMFLEX +010B;cdotaccent;LATIN SMALL LETTER C WITH DOT ABOVE +00B8;cedilla;CEDILLA +00A2;cent;CENT SIGN +03C7;chi;GREEK SMALL LETTER CHI +25CB;circle;WHITE CIRCLE +2297;circlemultiply;CIRCLED TIMES +2295;circleplus;CIRCLED PLUS +02C6;circumflex;MODIFIER LETTER CIRCUMFLEX ACCENT +2663;club;BLACK CLUB SUIT +003A;colon;COLON +20A1;colonmonetary;COLON SIGN +002C;comma;COMMA +2245;congruent;APPROXIMATELY EQUAL TO +00A9;copyright;COPYRIGHT SIGN +00A4;currency;CURRENCY SIGN +0064;d;LATIN SMALL LETTER D +2020;dagger;DAGGER +2021;daggerdbl;DOUBLE DAGGER +010F;dcaron;LATIN SMALL LETTER D WITH CARON +0111;dcroat;LATIN SMALL LETTER D WITH STROKE +00B0;degree;DEGREE SIGN +03B4;delta;GREEK SMALL LETTER DELTA +2666;diamond;BLACK DIAMOND SUIT +00A8;dieresis;DIAERESIS +0385;dieresistonos;GREEK DIALYTIKA TONOS +00F7;divide;DIVISION SIGN +2593;dkshade;DARK SHADE +2584;dnblock;LOWER HALF BLOCK +0024;dollar;DOLLAR SIGN +20AB;dong;DONG SIGN +02D9;dotaccent;DOT ABOVE +0323;dotbelowcomb;COMBINING DOT BELOW +0131;dotlessi;LATIN SMALL LETTER DOTLESS I +22C5;dotmath;DOT OPERATOR +0065;e;LATIN SMALL LETTER E +00E9;eacute;LATIN SMALL LETTER E WITH ACUTE +0115;ebreve;LATIN SMALL LETTER E WITH BREVE +011B;ecaron;LATIN SMALL LETTER E WITH CARON +00EA;ecircumflex;LATIN SMALL LETTER E WITH CIRCUMFLEX +00EB;edieresis;LATIN SMALL LETTER E WITH DIAERESIS +0117;edotaccent;LATIN SMALL LETTER E WITH DOT ABOVE +00E8;egrave;LATIN SMALL LETTER E WITH GRAVE +0038;eight;DIGIT EIGHT +2208;element;ELEMENT OF +2026;ellipsis;HORIZONTAL ELLIPSIS +0113;emacron;LATIN SMALL LETTER E WITH MACRON +2014;emdash;EM DASH +2205;emptyset;EMPTY SET +2013;endash;EN DASH +014B;eng;LATIN SMALL LETTER ENG +0119;eogonek;LATIN SMALL LETTER E WITH OGONEK +03B5;epsilon;GREEK SMALL LETTER EPSILON +03AD;epsilontonos;GREEK SMALL LETTER EPSILON WITH TONOS +003D;equal;EQUALS SIGN +2261;equivalence;IDENTICAL TO +212E;estimated;ESTIMATED SYMBOL +03B7;eta;GREEK SMALL LETTER ETA +03AE;etatonos;GREEK SMALL LETTER ETA WITH TONOS +00F0;eth;LATIN SMALL LETTER ETH +0021;exclam;EXCLAMATION MARK +203C;exclamdbl;DOUBLE EXCLAMATION MARK +00A1;exclamdown;INVERTED EXCLAMATION MARK +2203;existential;THERE EXISTS +0066;f;LATIN SMALL LETTER F +2640;female;FEMALE SIGN +2012;figuredash;FIGURE DASH +25A0;filledbox;BLACK SQUARE +25AC;filledrect;BLACK RECTANGLE +0035;five;DIGIT FIVE +215D;fiveeighths;VULGAR FRACTION FIVE EIGHTHS +0192;florin;LATIN SMALL LETTER F WITH HOOK +0034;four;DIGIT FOUR +2044;fraction;FRACTION SLASH +20A3;franc;FRENCH FRANC SIGN +0067;g;LATIN SMALL LETTER G +03B3;gamma;GREEK SMALL LETTER GAMMA +011F;gbreve;LATIN SMALL LETTER G WITH BREVE +01E7;gcaron;LATIN SMALL LETTER G WITH CARON +011D;gcircumflex;LATIN SMALL LETTER G WITH CIRCUMFLEX +0121;gdotaccent;LATIN SMALL LETTER G WITH DOT ABOVE +00DF;germandbls;LATIN SMALL LETTER SHARP S +2207;gradient;NABLA +0060;grave;GRAVE ACCENT +0300;gravecomb;COMBINING GRAVE ACCENT +003E;greater;GREATER-THAN SIGN +2265;greaterequal;GREATER-THAN OR EQUAL TO +00AB;guillemotleft;LEFT-POINTING DOUBLE ANGLE QUOTATION MARK +00BB;guillemotright;RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK +2039;guilsinglleft;SINGLE LEFT-POINTING ANGLE QUOTATION MARK +203A;guilsinglright;SINGLE RIGHT-POINTING ANGLE QUOTATION MARK +0068;h;LATIN SMALL LETTER H +0127;hbar;LATIN SMALL LETTER H WITH STROKE +0125;hcircumflex;LATIN SMALL LETTER H WITH CIRCUMFLEX +2665;heart;BLACK HEART SUIT +0309;hookabovecomb;COMBINING HOOK ABOVE +2302;house;HOUSE +02DD;hungarumlaut;DOUBLE ACUTE ACCENT +002D;hyphen;HYPHEN-MINUS +0069;i;LATIN SMALL LETTER I +00ED;iacute;LATIN SMALL LETTER I WITH ACUTE +012D;ibreve;LATIN SMALL LETTER I WITH BREVE +00EE;icircumflex;LATIN SMALL LETTER I WITH CIRCUMFLEX +00EF;idieresis;LATIN SMALL LETTER I WITH DIAERESIS +00EC;igrave;LATIN SMALL LETTER I WITH GRAVE +0133;ij;LATIN SMALL LIGATURE IJ +012B;imacron;LATIN SMALL LETTER I WITH MACRON +221E;infinity;INFINITY +222B;integral;INTEGRAL +2321;integralbt;BOTTOM HALF INTEGRAL +2320;integraltp;TOP HALF INTEGRAL +2229;intersection;INTERSECTION +25D8;invbullet;INVERSE BULLET +25D9;invcircle;INVERSE WHITE CIRCLE +263B;invsmileface;BLACK SMILING FACE +012F;iogonek;LATIN SMALL LETTER I WITH OGONEK +03B9;iota;GREEK SMALL LETTER IOTA +03CA;iotadieresis;GREEK SMALL LETTER IOTA WITH DIALYTIKA +0390;iotadieresistonos;GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS +03AF;iotatonos;GREEK SMALL LETTER IOTA WITH TONOS +0129;itilde;LATIN SMALL LETTER I WITH TILDE +006A;j;LATIN SMALL LETTER J +0135;jcircumflex;LATIN SMALL LETTER J WITH CIRCUMFLEX +006B;k;LATIN SMALL LETTER K +03BA;kappa;GREEK SMALL LETTER KAPPA +0138;kgreenlandic;LATIN SMALL LETTER KRA +006C;l;LATIN SMALL LETTER L +013A;lacute;LATIN SMALL LETTER L WITH ACUTE +03BB;lambda;GREEK SMALL LETTER LAMDA +013E;lcaron;LATIN SMALL LETTER L WITH CARON +0140;ldot;LATIN SMALL LETTER L WITH MIDDLE DOT +003C;less;LESS-THAN SIGN +2264;lessequal;LESS-THAN OR EQUAL TO +258C;lfblock;LEFT HALF BLOCK +20A4;lira;LIRA SIGN +2227;logicaland;LOGICAL AND +00AC;logicalnot;NOT SIGN +2228;logicalor;LOGICAL OR +017F;longs;LATIN SMALL LETTER LONG S +25CA;lozenge;LOZENGE +0142;lslash;LATIN SMALL LETTER L WITH STROKE +2591;ltshade;LIGHT SHADE +006D;m;LATIN SMALL LETTER M +00AF;macron;MACRON +2642;male;MALE SIGN +2212;minus;MINUS SIGN +2032;minute;PRIME +00B5;mu;MICRO SIGN +00D7;multiply;MULTIPLICATION SIGN +266A;musicalnote;EIGHTH NOTE +266B;musicalnotedbl;BEAMED EIGHTH NOTES +006E;n;LATIN SMALL LETTER N +0144;nacute;LATIN SMALL LETTER N WITH ACUTE +0149;napostrophe;LATIN SMALL LETTER N PRECEDED BY APOSTROPHE +0148;ncaron;LATIN SMALL LETTER N WITH CARON +0039;nine;DIGIT NINE +2209;notelement;NOT AN ELEMENT OF +2260;notequal;NOT EQUAL TO +2284;notsubset;NOT A SUBSET OF +00F1;ntilde;LATIN SMALL LETTER N WITH TILDE +03BD;nu;GREEK SMALL LETTER NU +0023;numbersign;NUMBER SIGN +006F;o;LATIN SMALL LETTER O +00F3;oacute;LATIN SMALL LETTER O WITH ACUTE +014F;obreve;LATIN SMALL LETTER O WITH BREVE +00F4;ocircumflex;LATIN SMALL LETTER O WITH CIRCUMFLEX +00F6;odieresis;LATIN SMALL LETTER O WITH DIAERESIS +0153;oe;LATIN SMALL LIGATURE OE +02DB;ogonek;OGONEK +00F2;ograve;LATIN SMALL LETTER O WITH GRAVE +01A1;ohorn;LATIN SMALL LETTER O WITH HORN +0151;ohungarumlaut;LATIN SMALL LETTER O WITH DOUBLE ACUTE +014D;omacron;LATIN SMALL LETTER O WITH MACRON +03C9;omega;GREEK SMALL LETTER OMEGA +03D6;omega1;GREEK PI SYMBOL +03CE;omegatonos;GREEK SMALL LETTER OMEGA WITH TONOS +03BF;omicron;GREEK SMALL LETTER OMICRON +03CC;omicrontonos;GREEK SMALL LETTER OMICRON WITH TONOS +0031;one;DIGIT ONE +2024;onedotenleader;ONE DOT LEADER +215B;oneeighth;VULGAR FRACTION ONE EIGHTH +00BD;onehalf;VULGAR FRACTION ONE HALF +00BC;onequarter;VULGAR FRACTION ONE QUARTER +2153;onethird;VULGAR FRACTION ONE THIRD +25E6;openbullet;WHITE BULLET +00AA;ordfeminine;FEMININE ORDINAL INDICATOR +00BA;ordmasculine;MASCULINE ORDINAL INDICATOR +221F;orthogonal;RIGHT ANGLE +00F8;oslash;LATIN SMALL LETTER O WITH STROKE +01FF;oslashacute;LATIN SMALL LETTER O WITH STROKE AND ACUTE +00F5;otilde;LATIN SMALL LETTER O WITH TILDE +0070;p;LATIN SMALL LETTER P +00B6;paragraph;PILCROW SIGN +0028;parenleft;LEFT PARENTHESIS +0029;parenright;RIGHT PARENTHESIS +2202;partialdiff;PARTIAL DIFFERENTIAL +0025;percent;PERCENT SIGN +002E;period;FULL STOP +00B7;periodcentered;MIDDLE DOT +22A5;perpendicular;UP TACK +2030;perthousand;PER MILLE SIGN +20A7;peseta;PESETA SIGN +03C6;phi;GREEK SMALL LETTER PHI +03D5;phi1;GREEK PHI SYMBOL +03C0;pi;GREEK SMALL LETTER PI +002B;plus;PLUS SIGN +00B1;plusminus;PLUS-MINUS SIGN +211E;prescription;PRESCRIPTION TAKE +220F;product;N-ARY PRODUCT +2282;propersubset;SUBSET OF +2283;propersuperset;SUPERSET OF +221D;proportional;PROPORTIONAL TO +03C8;psi;GREEK SMALL LETTER PSI +0071;q;LATIN SMALL LETTER Q +003F;question;QUESTION MARK +00BF;questiondown;INVERTED QUESTION MARK +0022;quotedbl;QUOTATION MARK +201E;quotedblbase;DOUBLE LOW-9 QUOTATION MARK +201C;quotedblleft;LEFT DOUBLE QUOTATION MARK +201D;quotedblright;RIGHT DOUBLE QUOTATION MARK +2018;quoteleft;LEFT SINGLE QUOTATION MARK +201B;quotereversed;SINGLE HIGH-REVERSED-9 QUOTATION MARK +2019;quoteright;RIGHT SINGLE QUOTATION MARK +201A;quotesinglbase;SINGLE LOW-9 QUOTATION MARK +0027;quotesingle;APOSTROPHE +0072;r;LATIN SMALL LETTER R +0155;racute;LATIN SMALL LETTER R WITH ACUTE +221A;radical;SQUARE ROOT +0159;rcaron;LATIN SMALL LETTER R WITH CARON +2286;reflexsubset;SUBSET OF OR EQUAL TO +2287;reflexsuperset;SUPERSET OF OR EQUAL TO +00AE;registered;REGISTERED SIGN +2310;revlogicalnot;REVERSED NOT SIGN +03C1;rho;GREEK SMALL LETTER RHO +02DA;ring;RING ABOVE +2590;rtblock;RIGHT HALF BLOCK +0073;s;LATIN SMALL LETTER S +015B;sacute;LATIN SMALL LETTER S WITH ACUTE +0161;scaron;LATIN SMALL LETTER S WITH CARON +015F;scedilla;LATIN SMALL LETTER S WITH CEDILLA +015D;scircumflex;LATIN SMALL LETTER S WITH CIRCUMFLEX +2033;second;DOUBLE PRIME +00A7;section;SECTION SIGN +003B;semicolon;SEMICOLON +0037;seven;DIGIT SEVEN +215E;seveneighths;VULGAR FRACTION SEVEN EIGHTHS +2592;shade;MEDIUM SHADE +03C3;sigma;GREEK SMALL LETTER SIGMA +03C2;sigma1;GREEK SMALL LETTER FINAL SIGMA +223C;similar;TILDE OPERATOR +0036;six;DIGIT SIX +002F;slash;SOLIDUS +263A;smileface;WHITE SMILING FACE +0020;space;SPACE +2660;spade;BLACK SPADE SUIT +00A3;sterling;POUND SIGN +220B;suchthat;CONTAINS AS MEMBER +2211;summation;N-ARY SUMMATION +263C;sun;WHITE SUN WITH RAYS +0074;t;LATIN SMALL LETTER T +03C4;tau;GREEK SMALL LETTER TAU +0167;tbar;LATIN SMALL LETTER T WITH STROKE +0165;tcaron;LATIN SMALL LETTER T WITH CARON +2234;therefore;THEREFORE +03B8;theta;GREEK SMALL LETTER THETA +03D1;theta1;GREEK THETA SYMBOL +00FE;thorn;LATIN SMALL LETTER THORN +0033;three;DIGIT THREE +215C;threeeighths;VULGAR FRACTION THREE EIGHTHS +00BE;threequarters;VULGAR FRACTION THREE QUARTERS +02DC;tilde;SMALL TILDE +0303;tildecomb;COMBINING TILDE +0384;tonos;GREEK TONOS +2122;trademark;TRADE MARK SIGN +25BC;triagdn;BLACK DOWN-POINTING TRIANGLE +25C4;triaglf;BLACK LEFT-POINTING POINTER +25BA;triagrt;BLACK RIGHT-POINTING POINTER +25B2;triagup;BLACK UP-POINTING TRIANGLE +0032;two;DIGIT TWO +2025;twodotenleader;TWO DOT LEADER +2154;twothirds;VULGAR FRACTION TWO THIRDS +0075;u;LATIN SMALL LETTER U +00FA;uacute;LATIN SMALL LETTER U WITH ACUTE +016D;ubreve;LATIN SMALL LETTER U WITH BREVE +00FB;ucircumflex;LATIN SMALL LETTER U WITH CIRCUMFLEX +00FC;udieresis;LATIN SMALL LETTER U WITH DIAERESIS +00F9;ugrave;LATIN SMALL LETTER U WITH GRAVE +01B0;uhorn;LATIN SMALL LETTER U WITH HORN +0171;uhungarumlaut;LATIN SMALL LETTER U WITH DOUBLE ACUTE +016B;umacron;LATIN SMALL LETTER U WITH MACRON +005F;underscore;LOW LINE +2017;underscoredbl;DOUBLE LOW LINE +222A;union;UNION +2200;universal;FOR ALL +0173;uogonek;LATIN SMALL LETTER U WITH OGONEK +2580;upblock;UPPER HALF BLOCK +03C5;upsilon;GREEK SMALL LETTER UPSILON +03CB;upsilondieresis;GREEK SMALL LETTER UPSILON WITH DIALYTIKA +03B0;upsilondieresistonos;GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS +03CD;upsilontonos;GREEK SMALL LETTER UPSILON WITH TONOS +016F;uring;LATIN SMALL LETTER U WITH RING ABOVE +0169;utilde;LATIN SMALL LETTER U WITH TILDE +0076;v;LATIN SMALL LETTER V +0077;w;LATIN SMALL LETTER W +1E83;wacute;LATIN SMALL LETTER W WITH ACUTE +0175;wcircumflex;LATIN SMALL LETTER W WITH CIRCUMFLEX +1E85;wdieresis;LATIN SMALL LETTER W WITH DIAERESIS +2118;weierstrass;SCRIPT CAPITAL P +1E81;wgrave;LATIN SMALL LETTER W WITH GRAVE +0078;x;LATIN SMALL LETTER X +03BE;xi;GREEK SMALL LETTER XI +0079;y;LATIN SMALL LETTER Y +00FD;yacute;LATIN SMALL LETTER Y WITH ACUTE +0177;ycircumflex;LATIN SMALL LETTER Y WITH CIRCUMFLEX +00FF;ydieresis;LATIN SMALL LETTER Y WITH DIAERESIS +00A5;yen;YEN SIGN +1EF3;ygrave;LATIN SMALL LETTER Y WITH GRAVE +007A;z;LATIN SMALL LETTER Z +017A;zacute;LATIN SMALL LETTER Z WITH ACUTE +017E;zcaron;LATIN SMALL LETTER Z WITH CARON +017C;zdotaccent;LATIN SMALL LETTER Z WITH DOT ABOVE +0030;zero;DIGIT ZERO +03B6;zeta;GREEK SMALL LETTER ZETA +# END +""" + + +class AGLError(Exception): + pass + + +LEGACY_AGL2UV = {} +AGL2UV = {} +UV2AGL = {} + + +def _builddicts(): + import re + + lines = _aglText.splitlines() + + parseAGL_RE = re.compile("([A-Za-z0-9]+);((?:[0-9A-F]{4})(?: (?:[0-9A-F]{4}))*)$") + + for line in lines: + if not line or line[:1] == "#": + continue + m = parseAGL_RE.match(line) + if not m: + raise AGLError("syntax error in glyphlist.txt: %s" % repr(line[:20])) + unicodes = m.group(2) + assert len(unicodes) % 5 == 4 + unicodes = [int(unicode, 16) for unicode in unicodes.split()] + glyphName = tostr(m.group(1)) + LEGACY_AGL2UV[glyphName] = unicodes + + lines = _aglfnText.splitlines() + + parseAGLFN_RE = re.compile("([0-9A-F]{4});([A-Za-z0-9]+);.*?$") + + for line in lines: + if not line or line[:1] == "#": + continue + m = parseAGLFN_RE.match(line) + if not m: + raise AGLError("syntax error in aglfn.txt: %s" % repr(line[:20])) + unicode = m.group(1) + assert len(unicode) == 4 + unicode = int(unicode, 16) + glyphName = tostr(m.group(2)) + AGL2UV[glyphName] = unicode + UV2AGL[unicode] = glyphName + + +_builddicts() + + +def toUnicode(glyph, isZapfDingbats=False): + """Convert glyph names to Unicode, such as ``'longs_t.oldstyle'`` --> ``u'ſt'`` + + If ``isZapfDingbats`` is ``True``, the implementation recognizes additional + glyph names (as required by the AGL specification). + """ + # https://github.com/adobe-type-tools/agl-specification#2-the-mapping + # + # 1. Drop all the characters from the glyph name starting with + # the first occurrence of a period (U+002E; FULL STOP), if any. + glyph = glyph.split(".", 1)[0] + + # 2. Split the remaining string into a sequence of components, + # using underscore (U+005F; LOW LINE) as the delimiter. + components = glyph.split("_") + + # 3. Map each component to a character string according to the + # procedure below, and concatenate those strings; the result + # is the character string to which the glyph name is mapped. + result = [_glyphComponentToUnicode(c, isZapfDingbats) for c in components] + return "".join(result) + + +def _glyphComponentToUnicode(component, isZapfDingbats): + # If the font is Zapf Dingbats (PostScript FontName: ZapfDingbats), + # and the component is in the ITC Zapf Dingbats Glyph List, then + # map it to the corresponding character in that list. + dingbat = _zapfDingbatsToUnicode(component) if isZapfDingbats else None + if dingbat: + return dingbat + + # Otherwise, if the component is in AGL, then map it + # to the corresponding character in that list. + uchars = LEGACY_AGL2UV.get(component) + if uchars: + return "".join(map(chr, uchars)) + + # Otherwise, if the component is of the form "uni" (U+0075, + # U+006E, and U+0069) followed by a sequence of uppercase + # hexadecimal digits (0–9 and A–F, meaning U+0030 through + # U+0039 and U+0041 through U+0046), if the length of that + # sequence is a multiple of four, and if each group of four + # digits represents a value in the ranges 0000 through D7FF + # or E000 through FFFF, then interpret each as a Unicode scalar + # value and map the component to the string made of those + # scalar values. Note that the range and digit-length + # restrictions mean that the "uni" glyph name prefix can be + # used only with UVs in the Basic Multilingual Plane (BMP). + uni = _uniToUnicode(component) + if uni: + return uni + + # Otherwise, if the component is of the form "u" (U+0075) + # followed by a sequence of four to six uppercase hexadecimal + # digits (0–9 and A–F, meaning U+0030 through U+0039 and + # U+0041 through U+0046), and those digits represents a value + # in the ranges 0000 through D7FF or E000 through 10FFFF, then + # interpret it as a Unicode scalar value and map the component + # to the string made of this scalar value. + uni = _uToUnicode(component) + if uni: + return uni + + # Otherwise, map the component to an empty string. + return "" + + +# https://github.com/adobe-type-tools/agl-aglfn/blob/master/zapfdingbats.txt +_AGL_ZAPF_DINGBATS = ( + " ✁✂✄☎✆✝✞✟✠✡☛☞✌✍✎✏✑✒✓✔✕✖✗✘✙✚✛✜✢✣✤✥✦✧★✩✪✫✬✭✮✯✰✱✲✳✴✵✶✷✸✹✺✻✼✽✾✿❀" + "❁❂❃❄❅❆❇❈❉❊❋●❍■❏❑▲▼◆❖ ◗❘❙❚❯❱❲❳❨❩❬❭❪❫❴❵❛❜❝❞❡❢❣❤✐❥❦❧♠♥♦♣ ✉✈✇" + "①②③④⑤⑥⑦⑧⑨⑩❶❷❸❹❺❻❼❽❾❿➀➁➂➃➄➅➆➇➈➉➊➋➌➍➎➏➐➑➒➓➔→➣↔" + "↕➙➛➜➝➞➟➠➡➢➤➥➦➧➨➩➫➭➯➲➳➵➸➺➻➼➽➾➚➪➶➹➘➴➷➬➮➱✃❐❒❮❰" +) + + +def _zapfDingbatsToUnicode(glyph): + """Helper for toUnicode().""" + if len(glyph) < 2 or glyph[0] != "a": + return None + try: + gid = int(glyph[1:]) + except ValueError: + return None + if gid < 0 or gid >= len(_AGL_ZAPF_DINGBATS): + return None + uchar = _AGL_ZAPF_DINGBATS[gid] + return uchar if uchar != " " else None + + +_re_uni = re.compile("^uni([0-9A-F]+)$") + + +def _uniToUnicode(component): + """Helper for toUnicode() to handle "uniABCD" components.""" + match = _re_uni.match(component) + if match is None: + return None + digits = match.group(1) + if len(digits) % 4 != 0: + return None + chars = [int(digits[i : i + 4], 16) for i in range(0, len(digits), 4)] + if any(c >= 0xD800 and c <= 0xDFFF for c in chars): + # The AGL specification explicitly excluded surrogate pairs. + return None + return "".join([chr(c) for c in chars]) + + +_re_u = re.compile("^u([0-9A-F]{4,6})$") + + +def _uToUnicode(component): + """Helper for toUnicode() to handle "u1ABCD" components.""" + match = _re_u.match(component) + if match is None: + return None + digits = match.group(1) + try: + value = int(digits, 16) + except ValueError: + return None + if (value >= 0x0000 and value <= 0xD7FF) or (value >= 0xE000 and value <= 0x10FFFF): + return chr(value) + return None diff --git a/py311/lib/python3.11/site-packages/fontTools/annotations.py b/py311/lib/python3.11/site-packages/fontTools/annotations.py new file mode 100644 index 0000000000000000000000000000000000000000..5ff5972e36cae66d7aaa80d54a5c69ca608b348d --- /dev/null +++ b/py311/lib/python3.11/site-packages/fontTools/annotations.py @@ -0,0 +1,30 @@ +from __future__ import annotations +from typing import TYPE_CHECKING, Iterable, Optional, TypeVar, Union +from collections.abc import Callable, Sequence +from fontTools.misc.filesystem._base import FS +from os import PathLike +from xml.etree.ElementTree import Element as ElementTreeElement + +if TYPE_CHECKING: + from fontTools.ufoLib import UFOFormatVersion + from fontTools.ufoLib.glifLib import GLIFFormatVersion + from lxml.etree import _Element as LxmlElement + + +T = TypeVar("T") # Generic type +K = TypeVar("K") # Generic dict key type +V = TypeVar("V") # Generic dict value type + +GlyphNameToFileNameFunc = Optional[Callable[[str, set[str]], str]] +ElementType = Union[ElementTreeElement, "LxmlElement"] +FormatVersion = Union[int, tuple[int, int]] +FormatVersions = Optional[Iterable[FormatVersion]] +GLIFFormatVersionInput = Optional[Union[int, tuple[int, int], "GLIFFormatVersion"]] +UFOFormatVersionInput = Optional[Union[int, tuple[int, int], "UFOFormatVersion"]] +IntFloat = Union[int, float] +KerningPair = tuple[str, str] +KerningDict = dict[KerningPair, IntFloat] +KerningGroups = dict[str, Sequence[str]] +KerningNested = dict[str, dict[str, IntFloat]] +PathStr = Union[str, PathLike[str]] +PathOrFS = Union[PathStr, FS] diff --git a/py311/lib/python3.11/site-packages/fontTools/cffLib/CFF2ToCFF.py b/py311/lib/python3.11/site-packages/fontTools/cffLib/CFF2ToCFF.py new file mode 100644 index 0000000000000000000000000000000000000000..f33e48cc4c0e003ed907498b5d79d3eade23ccf5 --- /dev/null +++ b/py311/lib/python3.11/site-packages/fontTools/cffLib/CFF2ToCFF.py @@ -0,0 +1,258 @@ +"""CFF2 to CFF converter.""" + +from fontTools.ttLib import TTFont, newTable +from fontTools.misc.cliTools import makeOutputFileName +from fontTools.misc.psCharStrings import T2StackUseExtractor +from fontTools.cffLib import ( + TopDictIndex, + buildOrder, + buildDefaults, + topDictOperators, + privateDictOperators, + FDSelect, +) +from .transforms import desubroutinizeCharString +from .specializer import specializeProgram +from .width import optimizeWidths +from collections import defaultdict +import logging + + +__all__ = ["convertCFF2ToCFF", "main"] + + +log = logging.getLogger("fontTools.cffLib") + + +def _convertCFF2ToCFF(cff, otFont): + """Converts this object from CFF2 format to CFF format. This conversion + is done 'in-place'. The conversion cannot be reversed. + + The CFF2 font cannot be variable. (TODO Accept those and convert to the + default instance?) + + This assumes a decompiled CFF2 table. (i.e. that the object has been + filled via :meth:`decompile` and e.g. not loaded from XML.)""" + + cff.major = 1 + + topDictData = TopDictIndex(None) + for item in cff.topDictIndex: + # Iterate over, such that all are decompiled + item.cff2GetGlyphOrder = None + topDictData.append(item) + cff.topDictIndex = topDictData + topDict = topDictData[0] + + if hasattr(topDict, "VarStore"): + raise ValueError("Variable CFF2 font cannot be converted to CFF format.") + + opOrder = buildOrder(topDictOperators) + topDict.order = opOrder + for key in topDict.rawDict.keys(): + if key not in opOrder: + del topDict.rawDict[key] + if hasattr(topDict, key): + delattr(topDict, key) + + charStrings = topDict.CharStrings + + fdArray = topDict.FDArray + if not hasattr(topDict, "FDSelect"): + # FDSelect is optional in CFF2, but required in CFF. + fdSelect = topDict.FDSelect = FDSelect() + fdSelect.gidArray = [0] * len(charStrings.charStrings) + + defaults = buildDefaults(privateDictOperators) + order = buildOrder(privateDictOperators) + for fd in fdArray: + fd.setCFF2(False) + privateDict = fd.Private + privateDict.order = order + for key in order: + if key not in privateDict.rawDict and key in defaults: + privateDict.rawDict[key] = defaults[key] + for key in privateDict.rawDict.keys(): + if key not in order: + del privateDict.rawDict[key] + if hasattr(privateDict, key): + delattr(privateDict, key) + + # Add ending operators + for cs in charStrings.values(): + cs.decompile() + cs.program.append("endchar") + for subrSets in [cff.GlobalSubrs] + [ + getattr(fd.Private, "Subrs", []) for fd in fdArray + ]: + for cs in subrSets: + cs.program.append("return") + + # Add (optimal) width to CharStrings that need it. + widths = defaultdict(list) + metrics = otFont["hmtx"].metrics + for glyphName in charStrings.keys(): + cs, fdIndex = charStrings.getItemAndSelector(glyphName) + if fdIndex == None: + fdIndex = 0 + widths[fdIndex].append(metrics[glyphName][0]) + for fdIndex, widthList in widths.items(): + bestDefault, bestNominal = optimizeWidths(widthList) + private = fdArray[fdIndex].Private + private.defaultWidthX = bestDefault + private.nominalWidthX = bestNominal + for glyphName in charStrings.keys(): + cs, fdIndex = charStrings.getItemAndSelector(glyphName) + if fdIndex == None: + fdIndex = 0 + private = fdArray[fdIndex].Private + width = metrics[glyphName][0] + if width != private.defaultWidthX: + cs.program.insert(0, width - private.nominalWidthX) + + # Handle stack use since stack-depth is lower in CFF than in CFF2. + for glyphName in charStrings.keys(): + cs, fdIndex = charStrings.getItemAndSelector(glyphName) + if fdIndex is None: + fdIndex = 0 + private = fdArray[fdIndex].Private + extractor = T2StackUseExtractor( + getattr(private, "Subrs", []), cff.GlobalSubrs, private=private + ) + stackUse = extractor.execute(cs) + if stackUse > 48: # CFF stack depth is 48 + desubroutinizeCharString(cs) + cs.program = specializeProgram(cs.program) + + # Unused subroutines are still in CFF2 (ie. lacking 'return' operator) + # because they were not decompiled when we added the 'return'. + # Moreover, some used subroutines may have become unused after the + # stack-use fixup. So we remove all unused subroutines now. + cff.remove_unused_subroutines() + + mapping = { + name: ("cid" + str(n).zfill(5) if n else ".notdef") + for n, name in enumerate(topDict.charset) + } + topDict.charset = [ + "cid" + str(n).zfill(5) if n else ".notdef" for n in range(len(topDict.charset)) + ] + charStrings.charStrings = { + mapping[name]: v for name, v in charStrings.charStrings.items() + } + + topDict.ROS = ("Adobe", "Identity", 0) + + +def convertCFF2ToCFF(font, *, updatePostTable=True): + if "CFF2" not in font: + raise ValueError("Input font does not contain a CFF2 table.") + cff = font["CFF2"].cff + _convertCFF2ToCFF(cff, font) + del font["CFF2"] + table = font["CFF "] = newTable("CFF ") + table.cff = cff + + if updatePostTable and "post" in font: + # Only version supported for fonts with CFF table is 0x00030000 not 0x20000 + post = font["post"] + if post.formatType == 2.0: + post.formatType = 3.0 + + +def main(args=None): + """Convert CFF2 OTF font to CFF OTF font""" + if args is None: + import sys + + args = sys.argv[1:] + + import argparse + + parser = argparse.ArgumentParser( + "fonttools cffLib.CFF2ToCFF", + description="Convert a non-variable CFF2 font to CFF.", + ) + parser.add_argument( + "input", metavar="INPUT.ttf", help="Input OTF file with CFF table." + ) + parser.add_argument( + "-o", + "--output", + metavar="OUTPUT.ttf", + default=None, + help="Output instance OTF file (default: INPUT-CFF2.ttf).", + ) + parser.add_argument( + "--no-recalc-timestamp", + dest="recalc_timestamp", + action="store_false", + help="Don't set the output font's timestamp to the current time.", + ) + parser.add_argument( + "--remove-overlaps", + action="store_true", + help="Merge overlapping contours and components. Requires skia-pathops", + ) + parser.add_argument( + "--ignore-overlap-errors", + action="store_true", + help="Don't crash if the remove-overlaps operation fails for some glyphs.", + ) + loggingGroup = parser.add_mutually_exclusive_group(required=False) + loggingGroup.add_argument( + "-v", "--verbose", action="store_true", help="Run more verbosely." + ) + loggingGroup.add_argument( + "-q", "--quiet", action="store_true", help="Turn verbosity off." + ) + options = parser.parse_args(args) + + from fontTools import configLogger + + configLogger( + level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO") + ) + + import os + + infile = options.input + if not os.path.isfile(infile): + parser.error("No such file '{}'".format(infile)) + + outfile = ( + makeOutputFileName(infile, overWrite=True, suffix="-CFF") + if not options.output + else options.output + ) + + font = TTFont(infile, recalcTimestamp=options.recalc_timestamp, recalcBBoxes=False) + + convertCFF2ToCFF(font) + + if options.remove_overlaps: + from fontTools.ttLib.removeOverlaps import removeOverlaps + from io import BytesIO + + log.debug("Removing overlaps") + + stream = BytesIO() + font.save(stream) + stream.seek(0) + font = TTFont(stream, recalcTimestamp=False, recalcBBoxes=False) + removeOverlaps( + font, + ignoreErrors=options.ignore_overlap_errors, + ) + + log.info( + "Saving %s", + outfile, + ) + font.save(outfile) + + +if __name__ == "__main__": + import sys + + sys.exit(main(sys.argv[1:])) diff --git a/py311/lib/python3.11/site-packages/fontTools/cffLib/CFFToCFF2.py b/py311/lib/python3.11/site-packages/fontTools/cffLib/CFFToCFF2.py new file mode 100644 index 0000000000000000000000000000000000000000..2555f0b242591cde7738f46932fd1cbe2d0a6ccf --- /dev/null +++ b/py311/lib/python3.11/site-packages/fontTools/cffLib/CFFToCFF2.py @@ -0,0 +1,305 @@ +"""CFF to CFF2 converter.""" + +from fontTools.ttLib import TTFont, newTable +from fontTools.misc.cliTools import makeOutputFileName +from fontTools.misc.psCharStrings import T2WidthExtractor +from fontTools.cffLib import ( + TopDictIndex, + FDArrayIndex, + FontDict, + buildOrder, + topDictOperators, + privateDictOperators, + topDictOperators2, + privateDictOperators2, +) +from io import BytesIO +import logging + +__all__ = ["convertCFFToCFF2", "main"] + + +log = logging.getLogger("fontTools.cffLib") + + +class _NominalWidthUsedError(Exception): + def __add__(self, other): + raise self + + def __radd__(self, other): + raise self + + +def _convertCFFToCFF2(cff, otFont): + """Converts this object from CFF format to CFF2 format. This conversion + is done 'in-place'. The conversion cannot be reversed. + + This assumes a decompiled CFF table. (i.e. that the object has been + filled via :meth:`decompile` and e.g. not loaded from XML.)""" + + # Clean up T2CharStrings + + topDict = cff.topDictIndex[0] + fdArray = topDict.FDArray if hasattr(topDict, "FDArray") else None + charStrings = topDict.CharStrings + globalSubrs = cff.GlobalSubrs + localSubrs = ( + [getattr(fd.Private, "Subrs", []) for fd in fdArray] + if fdArray + else ( + [topDict.Private.Subrs] + if hasattr(topDict, "Private") and hasattr(topDict.Private, "Subrs") + else [] + ) + ) + + for glyphName in charStrings.keys(): + cs, fdIndex = charStrings.getItemAndSelector(glyphName) + cs.decompile() + + # Clean up subroutines first + for subrs in [globalSubrs] + localSubrs: + for subr in subrs: + program = subr.program + i = j = len(program) + try: + i = program.index("return") + except ValueError: + pass + try: + j = program.index("endchar") + except ValueError: + pass + program[min(i, j) :] = [] + + # Clean up glyph charstrings + removeUnusedSubrs = False + nominalWidthXError = _NominalWidthUsedError() + for glyphName in charStrings.keys(): + cs, fdIndex = charStrings.getItemAndSelector(glyphName) + program = cs.program + + thisLocalSubrs = ( + localSubrs[fdIndex] + if fdIndex is not None + else ( + getattr(topDict.Private, "Subrs", []) + if hasattr(topDict, "Private") + else [] + ) + ) + + # Intentionally use custom type for nominalWidthX, such that any + # CharString that has an explicit width encoded will throw back to us. + extractor = T2WidthExtractor( + thisLocalSubrs, + globalSubrs, + nominalWidthXError, + 0, + ) + try: + extractor.execute(cs) + except _NominalWidthUsedError: + # Program has explicit width. We want to drop it, but can't + # just pop the first number since it may be a subroutine call. + # Instead, when seeing that, we embed the subroutine and recurse. + # If this ever happened, we later prune unused subroutines. + while len(program) >= 2 and program[1] in ["callsubr", "callgsubr"]: + removeUnusedSubrs = True + subrNumber = program.pop(0) + assert isinstance(subrNumber, int), subrNumber + op = program.pop(0) + bias = extractor.localBias if op == "callsubr" else extractor.globalBias + subrNumber += bias + subrSet = thisLocalSubrs if op == "callsubr" else globalSubrs + subrProgram = subrSet[subrNumber].program + program[:0] = subrProgram + # Now pop the actual width + assert len(program) >= 1, program + program.pop(0) + + if program and program[-1] == "endchar": + program.pop() + + if removeUnusedSubrs: + cff.remove_unused_subroutines() + + # Upconvert TopDict + + cff.major = 2 + cff2GetGlyphOrder = cff.otFont.getGlyphOrder + topDictData = TopDictIndex(None, cff2GetGlyphOrder) + for item in cff.topDictIndex: + # Iterate over, such that all are decompiled + topDictData.append(item) + cff.topDictIndex = topDictData + topDict = topDictData[0] + if hasattr(topDict, "Private"): + privateDict = topDict.Private + else: + privateDict = None + opOrder = buildOrder(topDictOperators2) + topDict.order = opOrder + topDict.cff2GetGlyphOrder = cff2GetGlyphOrder + + if not hasattr(topDict, "FDArray"): + fdArray = topDict.FDArray = FDArrayIndex() + fdArray.strings = None + fdArray.GlobalSubrs = topDict.GlobalSubrs + topDict.GlobalSubrs.fdArray = fdArray + charStrings = topDict.CharStrings + if charStrings.charStringsAreIndexed: + charStrings.charStringsIndex.fdArray = fdArray + else: + charStrings.fdArray = fdArray + fontDict = FontDict() + fontDict.setCFF2(True) + fdArray.append(fontDict) + fontDict.Private = privateDict + privateOpOrder = buildOrder(privateDictOperators2) + if privateDict is not None: + for entry in privateDictOperators: + key = entry[1] + if key not in privateOpOrder: + if key in privateDict.rawDict: + # print "Removing private dict", key + del privateDict.rawDict[key] + if hasattr(privateDict, key): + delattr(privateDict, key) + # print "Removing privateDict attr", key + else: + # clean up the PrivateDicts in the fdArray + fdArray = topDict.FDArray + privateOpOrder = buildOrder(privateDictOperators2) + for fontDict in fdArray: + fontDict.setCFF2(True) + for key in list(fontDict.rawDict.keys()): + if key not in fontDict.order: + del fontDict.rawDict[key] + if hasattr(fontDict, key): + delattr(fontDict, key) + + privateDict = fontDict.Private + for entry in privateDictOperators: + key = entry[1] + if key not in privateOpOrder: + if key in list(privateDict.rawDict.keys()): + # print "Removing private dict", key + del privateDict.rawDict[key] + if hasattr(privateDict, key): + delattr(privateDict, key) + # print "Removing privateDict attr", key + + # Now delete up the deprecated topDict operators from CFF 1.0 + for entry in topDictOperators: + key = entry[1] + # We seem to need to keep the charset operator for now, + # or we fail to compile with some fonts, like AdditionFont.otf. + # I don't know which kind of CFF font those are. But keeping + # charset seems to work. It will be removed when we save and + # read the font again. + # + # AdditionFont.otf has . + if key == "charset": + continue + if key not in opOrder: + if key in topDict.rawDict: + del topDict.rawDict[key] + if hasattr(topDict, key): + delattr(topDict, key) + + # TODO(behdad): What does the following comment even mean? Both CFF and CFF2 + # use the same T2Charstring class. I *think* what it means is that the CharStrings + # were loaded for CFF1, and we need to reload them for CFF2 to set varstore, etc + # on them. At least that's what I understand. It's probably safe to remove this + # and just set vstore where needed. + # + # See comment above about charset as well. + + # At this point, the Subrs and Charstrings are all still T2Charstring class + # easiest to fix this by compiling, then decompiling again + file = BytesIO() + cff.compile(file, otFont, isCFF2=True) + file.seek(0) + cff.decompile(file, otFont, isCFF2=True) + + +def convertCFFToCFF2(font): + cff = font["CFF "].cff + del font["CFF "] + _convertCFFToCFF2(cff, font) + table = font["CFF2"] = newTable("CFF2") + table.cff = cff + + +def main(args=None): + """Convert CFF OTF font to CFF2 OTF font""" + if args is None: + import sys + + args = sys.argv[1:] + + import argparse + + parser = argparse.ArgumentParser( + "fonttools cffLib.CFFToCFF2", + description="Upgrade a CFF font to CFF2.", + ) + parser.add_argument( + "input", metavar="INPUT.ttf", help="Input OTF file with CFF table." + ) + parser.add_argument( + "-o", + "--output", + metavar="OUTPUT.ttf", + default=None, + help="Output instance OTF file (default: INPUT-CFF2.ttf).", + ) + parser.add_argument( + "--no-recalc-timestamp", + dest="recalc_timestamp", + action="store_false", + help="Don't set the output font's timestamp to the current time.", + ) + loggingGroup = parser.add_mutually_exclusive_group(required=False) + loggingGroup.add_argument( + "-v", "--verbose", action="store_true", help="Run more verbosely." + ) + loggingGroup.add_argument( + "-q", "--quiet", action="store_true", help="Turn verbosity off." + ) + options = parser.parse_args(args) + + from fontTools import configLogger + + configLogger( + level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO") + ) + + import os + + infile = options.input + if not os.path.isfile(infile): + parser.error("No such file '{}'".format(infile)) + + outfile = ( + makeOutputFileName(infile, overWrite=True, suffix="-CFF2") + if not options.output + else options.output + ) + + font = TTFont(infile, recalcTimestamp=options.recalc_timestamp, recalcBBoxes=False) + + convertCFFToCFF2(font) + + log.info( + "Saving %s", + outfile, + ) + font.save(outfile) + + +if __name__ == "__main__": + import sys + + sys.exit(main(sys.argv[1:])) diff --git a/py311/lib/python3.11/site-packages/fontTools/cffLib/__init__.py b/py311/lib/python3.11/site-packages/fontTools/cffLib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4ad724a27a812839d6c5e58314a5ff2f583d978c --- /dev/null +++ b/py311/lib/python3.11/site-packages/fontTools/cffLib/__init__.py @@ -0,0 +1,3694 @@ +"""cffLib: read/write Adobe CFF fonts + +OpenType fonts with PostScript outlines embed a completely independent +font file in Adobe's *Compact Font Format*. So dealing with OpenType fonts +requires also dealing with CFF. This module allows you to read and write +fonts written in the CFF format. + +In 2016, OpenType 1.8 introduced the `CFF2 `_ +format which, along with other changes, extended the CFF format to deal with +the demands of variable fonts. This module parses both original CFF and CFF2. + +""" + +from fontTools.misc import sstruct +from fontTools.misc import psCharStrings +from fontTools.misc.arrayTools import unionRect, intRect +from fontTools.misc.textTools import ( + bytechr, + byteord, + bytesjoin, + tobytes, + tostr, + safeEval, +) +from fontTools.ttLib import TTFont +from fontTools.ttLib.tables.otBase import OTTableWriter +from fontTools.ttLib.tables.otBase import OTTableReader +from fontTools.ttLib.tables import otTables as ot +from io import BytesIO +import struct +import logging +import re + +# mute cffLib debug messages when running ttx in verbose mode +DEBUG = logging.DEBUG - 1 +log = logging.getLogger(__name__) + +cffHeaderFormat = """ + major: B + minor: B + hdrSize: B +""" + +maxStackLimit = 513 +# maxstack operator has been deprecated. max stack is now always 513. + + +class CFFFontSet(object): + """A CFF font "file" can contain more than one font, although this is + extremely rare (and not allowed within OpenType fonts). + + This class is the entry point for parsing a CFF table. To actually + manipulate the data inside the CFF font, you will want to access the + ``CFFFontSet``'s :class:`TopDict` object. To do this, a ``CFFFontSet`` + object can either be treated as a dictionary (with appropriate + ``keys()`` and ``values()`` methods) mapping font names to :class:`TopDict` + objects, or as a list. + + .. code:: python + + from fontTools import ttLib + tt = ttLib.TTFont("Tests/cffLib/data/LinLibertine_RBI.otf") + tt["CFF "].cff + # + tt["CFF "].cff[0] # Here's your actual font data + # + + """ + + def decompile(self, file, otFont, isCFF2=None): + """Parse a binary CFF file into an internal representation. ``file`` + should be a file handle object. ``otFont`` is the top-level + :py:class:`fontTools.ttLib.ttFont.TTFont` object containing this CFF file. + + If ``isCFF2`` is passed and set to ``True`` or ``False``, then the + library makes an assertion that the CFF header is of the appropriate + version. + """ + + self.otFont = otFont + sstruct.unpack(cffHeaderFormat, file.read(3), self) + if isCFF2 is not None: + # called from ttLib: assert 'major' as read from file matches the + # expected version + expected_major = 2 if isCFF2 else 1 + if self.major != expected_major: + raise ValueError( + "Invalid CFF 'major' version: expected %d, found %d" + % (expected_major, self.major) + ) + else: + # use 'major' version from file to determine if isCFF2 + assert self.major in (1, 2), "Unknown CFF format" + isCFF2 = self.major == 2 + if not isCFF2: + self.offSize = struct.unpack("B", file.read(1))[0] + file.seek(self.hdrSize) + self.fontNames = list(tostr(s) for s in Index(file, isCFF2=isCFF2)) + self.topDictIndex = TopDictIndex(file, isCFF2=isCFF2) + self.strings = IndexedStrings(file) + else: # isCFF2 + self.topDictSize = struct.unpack(">H", file.read(2))[0] + file.seek(self.hdrSize) + self.fontNames = ["CFF2Font"] + cff2GetGlyphOrder = otFont.getGlyphOrder + # in CFF2, offsetSize is the size of the TopDict data. + self.topDictIndex = TopDictIndex( + file, cff2GetGlyphOrder, self.topDictSize, isCFF2=isCFF2 + ) + self.strings = None + self.GlobalSubrs = GlobalSubrsIndex(file, isCFF2=isCFF2) + self.topDictIndex.strings = self.strings + self.topDictIndex.GlobalSubrs = self.GlobalSubrs + + def __len__(self): + return len(self.fontNames) + + def keys(self): + return list(self.fontNames) + + def values(self): + return self.topDictIndex + + def __getitem__(self, nameOrIndex): + """Return TopDict instance identified by name (str) or index (int + or any object that implements `__index__`). + """ + if hasattr(nameOrIndex, "__index__"): + index = nameOrIndex.__index__() + elif isinstance(nameOrIndex, str): + name = nameOrIndex + try: + index = self.fontNames.index(name) + except ValueError: + raise KeyError(nameOrIndex) + else: + raise TypeError(nameOrIndex) + return self.topDictIndex[index] + + def compile(self, file, otFont, isCFF2=None): + """Write the object back into binary representation onto the given file. + ``file`` should be a file handle object. ``otFont`` is the top-level + :py:class:`fontTools.ttLib.ttFont.TTFont` object containing this CFF file. + + If ``isCFF2`` is passed and set to ``True`` or ``False``, then the + library makes an assertion that the CFF header is of the appropriate + version. + """ + self.otFont = otFont + if isCFF2 is not None: + # called from ttLib: assert 'major' value matches expected version + expected_major = 2 if isCFF2 else 1 + if self.major != expected_major: + raise ValueError( + "Invalid CFF 'major' version: expected %d, found %d" + % (expected_major, self.major) + ) + else: + # use current 'major' value to determine output format + assert self.major in (1, 2), "Unknown CFF format" + isCFF2 = self.major == 2 + + if otFont.recalcBBoxes and not isCFF2: + for topDict in self.topDictIndex: + topDict.recalcFontBBox() + + if not isCFF2: + strings = IndexedStrings() + else: + strings = None + writer = CFFWriter(isCFF2) + topCompiler = self.topDictIndex.getCompiler(strings, self, isCFF2=isCFF2) + if isCFF2: + self.hdrSize = 5 + writer.add(sstruct.pack(cffHeaderFormat, self)) + # Note: topDictSize will most likely change in CFFWriter.toFile(). + self.topDictSize = topCompiler.getDataLength() + writer.add(struct.pack(">H", self.topDictSize)) + else: + self.hdrSize = 4 + self.offSize = 4 # will most likely change in CFFWriter.toFile(). + writer.add(sstruct.pack(cffHeaderFormat, self)) + writer.add(struct.pack("B", self.offSize)) + if not isCFF2: + fontNames = Index() + for name in self.fontNames: + fontNames.append(name) + writer.add(fontNames.getCompiler(strings, self, isCFF2=isCFF2)) + writer.add(topCompiler) + if not isCFF2: + writer.add(strings.getCompiler()) + writer.add(self.GlobalSubrs.getCompiler(strings, self, isCFF2=isCFF2)) + + for topDict in self.topDictIndex: + if not hasattr(topDict, "charset") or topDict.charset is None: + charset = otFont.getGlyphOrder() + topDict.charset = charset + children = topCompiler.getChildren(strings) + for child in children: + writer.add(child) + + writer.toFile(file) + + def toXML(self, xmlWriter): + """Write the object into XML representation onto the given + :class:`fontTools.misc.xmlWriter.XMLWriter`. + + .. code:: python + + writer = xmlWriter.XMLWriter(sys.stdout) + tt["CFF "].cff.toXML(writer) + + """ + + xmlWriter.simpletag("major", value=self.major) + xmlWriter.newline() + xmlWriter.simpletag("minor", value=self.minor) + xmlWriter.newline() + for fontName in self.fontNames: + xmlWriter.begintag("CFFFont", name=tostr(fontName)) + xmlWriter.newline() + font = self[fontName] + font.toXML(xmlWriter) + xmlWriter.endtag("CFFFont") + xmlWriter.newline() + xmlWriter.newline() + xmlWriter.begintag("GlobalSubrs") + xmlWriter.newline() + self.GlobalSubrs.toXML(xmlWriter) + xmlWriter.endtag("GlobalSubrs") + xmlWriter.newline() + + def fromXML(self, name, attrs, content, otFont=None): + """Reads data from the XML element into the ``CFFFontSet`` object.""" + self.otFont = otFont + + # set defaults. These will be replaced if there are entries for them + # in the XML file. + if not hasattr(self, "major"): + self.major = 1 + if not hasattr(self, "minor"): + self.minor = 0 + + if name == "CFFFont": + if self.major == 1: + if not hasattr(self, "offSize"): + # this will be recalculated when the cff is compiled. + self.offSize = 4 + if not hasattr(self, "hdrSize"): + self.hdrSize = 4 + if not hasattr(self, "GlobalSubrs"): + self.GlobalSubrs = GlobalSubrsIndex() + if not hasattr(self, "fontNames"): + self.fontNames = [] + self.topDictIndex = TopDictIndex() + fontName = attrs["name"] + self.fontNames.append(fontName) + topDict = TopDict(GlobalSubrs=self.GlobalSubrs) + topDict.charset = None # gets filled in later + elif self.major == 2: + if not hasattr(self, "hdrSize"): + self.hdrSize = 5 + if not hasattr(self, "GlobalSubrs"): + self.GlobalSubrs = GlobalSubrsIndex() + if not hasattr(self, "fontNames"): + self.fontNames = ["CFF2Font"] + cff2GetGlyphOrder = self.otFont.getGlyphOrder + topDict = TopDict( + GlobalSubrs=self.GlobalSubrs, cff2GetGlyphOrder=cff2GetGlyphOrder + ) + self.topDictIndex = TopDictIndex(None, cff2GetGlyphOrder) + self.topDictIndex.append(topDict) + for element in content: + if isinstance(element, str): + continue + name, attrs, content = element + topDict.fromXML(name, attrs, content) + + if hasattr(topDict, "VarStore") and topDict.FDArray[0].vstore is None: + fdArray = topDict.FDArray + for fontDict in fdArray: + if hasattr(fontDict, "Private"): + fontDict.Private.vstore = topDict.VarStore + + elif name == "GlobalSubrs": + subrCharStringClass = psCharStrings.T2CharString + if not hasattr(self, "GlobalSubrs"): + self.GlobalSubrs = GlobalSubrsIndex() + for element in content: + if isinstance(element, str): + continue + name, attrs, content = element + subr = subrCharStringClass() + subr.fromXML(name, attrs, content) + self.GlobalSubrs.append(subr) + elif name == "major": + self.major = int(attrs["value"]) + elif name == "minor": + self.minor = int(attrs["value"]) + + def convertCFFToCFF2(self, otFont): + from .CFFToCFF2 import _convertCFFToCFF2 + + _convertCFFToCFF2(self, otFont) + + def convertCFF2ToCFF(self, otFont): + from .CFF2ToCFF import _convertCFF2ToCFF + + _convertCFF2ToCFF(self, otFont) + + def desubroutinize(self): + from .transforms import desubroutinize + + desubroutinize(self) + + def remove_hints(self): + from .transforms import remove_hints + + remove_hints(self) + + def remove_unused_subroutines(self): + from .transforms import remove_unused_subroutines + + remove_unused_subroutines(self) + + +class CFFWriter(object): + """Helper class for serializing CFF data to binary. Used by + :meth:`CFFFontSet.compile`.""" + + def __init__(self, isCFF2): + self.data = [] + self.isCFF2 = isCFF2 + + def add(self, table): + self.data.append(table) + + def toFile(self, file): + lastPosList = None + count = 1 + while True: + log.log(DEBUG, "CFFWriter.toFile() iteration: %d", count) + count = count + 1 + pos = 0 + posList = [pos] + for item in self.data: + if hasattr(item, "getDataLength"): + endPos = pos + item.getDataLength() + if isinstance(item, TopDictIndexCompiler) and item.isCFF2: + self.topDictSize = item.getDataLength() + else: + endPos = pos + len(item) + if hasattr(item, "setPos"): + item.setPos(pos, endPos) + pos = endPos + posList.append(pos) + if posList == lastPosList: + break + lastPosList = posList + log.log(DEBUG, "CFFWriter.toFile() writing to file.") + begin = file.tell() + if self.isCFF2: + self.data[1] = struct.pack(">H", self.topDictSize) + else: + self.offSize = calcOffSize(lastPosList[-1]) + self.data[1] = struct.pack("B", self.offSize) + posList = [0] + for item in self.data: + if hasattr(item, "toFile"): + item.toFile(file) + else: + file.write(item) + posList.append(file.tell() - begin) + assert posList == lastPosList + + +def calcOffSize(largestOffset): + if largestOffset < 0x100: + offSize = 1 + elif largestOffset < 0x10000: + offSize = 2 + elif largestOffset < 0x1000000: + offSize = 3 + else: + offSize = 4 + return offSize + + +class IndexCompiler(object): + """Base class for writing CFF `INDEX data `_ + to binary.""" + + def __init__(self, items, strings, parent, isCFF2=None): + if isCFF2 is None and hasattr(parent, "isCFF2"): + isCFF2 = parent.isCFF2 + assert isCFF2 is not None + self.isCFF2 = isCFF2 + self.items = self.getItems(items, strings) + self.parent = parent + + def getItems(self, items, strings): + return items + + def getOffsets(self): + # An empty INDEX contains only the count field. + if self.items: + pos = 1 + offsets = [pos] + for item in self.items: + if hasattr(item, "getDataLength"): + pos = pos + item.getDataLength() + else: + pos = pos + len(item) + offsets.append(pos) + else: + offsets = [] + return offsets + + def getDataLength(self): + if self.isCFF2: + countSize = 4 + else: + countSize = 2 + + if self.items: + lastOffset = self.getOffsets()[-1] + offSize = calcOffSize(lastOffset) + dataLength = ( + countSize + + 1 # count + + (len(self.items) + 1) * offSize # offSize + + lastOffset # the offsets + - 1 # size of object data + ) + else: + # count. For empty INDEX tables, this is the only entry. + dataLength = countSize + + return dataLength + + def toFile(self, file): + offsets = self.getOffsets() + if self.isCFF2: + writeCard32(file, len(self.items)) + else: + writeCard16(file, len(self.items)) + # An empty INDEX contains only the count field. + if self.items: + offSize = calcOffSize(offsets[-1]) + writeCard8(file, offSize) + offSize = -offSize + pack = struct.pack + for offset in offsets: + binOffset = pack(">l", offset)[offSize:] + assert len(binOffset) == -offSize + file.write(binOffset) + for item in self.items: + if hasattr(item, "toFile"): + item.toFile(file) + else: + data = tobytes(item, encoding="latin1") + file.write(data) + + +class IndexedStringsCompiler(IndexCompiler): + def getItems(self, items, strings): + return items.strings + + +class TopDictIndexCompiler(IndexCompiler): + """Helper class for writing the TopDict to binary.""" + + def getItems(self, items, strings): + out = [] + for item in items: + out.append(item.getCompiler(strings, self)) + return out + + def getChildren(self, strings): + children = [] + for topDict in self.items: + children.extend(topDict.getChildren(strings)) + return children + + def getOffsets(self): + if self.isCFF2: + offsets = [0, self.items[0].getDataLength()] + return offsets + else: + return super(TopDictIndexCompiler, self).getOffsets() + + def getDataLength(self): + if self.isCFF2: + dataLength = self.items[0].getDataLength() + return dataLength + else: + return super(TopDictIndexCompiler, self).getDataLength() + + def toFile(self, file): + if self.isCFF2: + self.items[0].toFile(file) + else: + super(TopDictIndexCompiler, self).toFile(file) + + +class FDArrayIndexCompiler(IndexCompiler): + """Helper class for writing the + `Font DICT INDEX `_ + to binary.""" + + def getItems(self, items, strings): + out = [] + for item in items: + out.append(item.getCompiler(strings, self)) + return out + + def getChildren(self, strings): + children = [] + for fontDict in self.items: + children.extend(fontDict.getChildren(strings)) + return children + + def toFile(self, file): + offsets = self.getOffsets() + if self.isCFF2: + writeCard32(file, len(self.items)) + else: + writeCard16(file, len(self.items)) + offSize = calcOffSize(offsets[-1]) + writeCard8(file, offSize) + offSize = -offSize + pack = struct.pack + for offset in offsets: + binOffset = pack(">l", offset)[offSize:] + assert len(binOffset) == -offSize + file.write(binOffset) + for item in self.items: + if hasattr(item, "toFile"): + item.toFile(file) + else: + file.write(item) + + def setPos(self, pos, endPos): + self.parent.rawDict["FDArray"] = pos + + +class GlobalSubrsCompiler(IndexCompiler): + """Helper class for writing the `global subroutine INDEX `_ + to binary.""" + + def getItems(self, items, strings): + out = [] + for cs in items: + cs.compile(self.isCFF2) + out.append(cs.bytecode) + return out + + +class SubrsCompiler(GlobalSubrsCompiler): + """Helper class for writing the `local subroutine INDEX `_ + to binary.""" + + def setPos(self, pos, endPos): + offset = pos - self.parent.pos + self.parent.rawDict["Subrs"] = offset + + +class CharStringsCompiler(GlobalSubrsCompiler): + """Helper class for writing the `CharStrings INDEX `_ + to binary.""" + + def getItems(self, items, strings): + out = [] + for cs in items: + cs.compile(self.isCFF2) + out.append(cs.bytecode) + return out + + def setPos(self, pos, endPos): + self.parent.rawDict["CharStrings"] = pos + + +class Index(object): + """This class represents what the CFF spec calls an INDEX (an array of + variable-sized objects). `Index` items can be addressed and set using + Python list indexing.""" + + compilerClass = IndexCompiler + + def __init__(self, file=None, isCFF2=None): + self.items = [] + self.offsets = offsets = [] + name = self.__class__.__name__ + if file is None: + return + self._isCFF2 = isCFF2 + log.log(DEBUG, "loading %s at %s", name, file.tell()) + self.file = file + if isCFF2: + count = readCard32(file) + else: + count = readCard16(file) + if count == 0: + return + self.items = [None] * count + offSize = readCard8(file) + log.log(DEBUG, " index count: %s offSize: %s", count, offSize) + assert offSize <= 4, "offSize too large: %s" % offSize + pad = b"\0" * (4 - offSize) + for index in range(count + 1): + chunk = file.read(offSize) + chunk = pad + chunk + (offset,) = struct.unpack(">L", chunk) + offsets.append(int(offset)) + self.offsetBase = file.tell() - 1 + file.seek(self.offsetBase + offsets[-1]) # pretend we've read the whole lot + log.log(DEBUG, " end of %s at %s", name, file.tell()) + + def __len__(self): + return len(self.items) + + def __getitem__(self, index): + item = self.items[index] + if item is not None: + return item + offset = self.offsets[index] + self.offsetBase + size = self.offsets[index + 1] - self.offsets[index] + file = self.file + file.seek(offset) + data = file.read(size) + assert len(data) == size + item = self.produceItem(index, data, file, offset) + self.items[index] = item + return item + + def __setitem__(self, index, item): + self.items[index] = item + + def produceItem(self, index, data, file, offset): + return data + + def append(self, item): + """Add an item to an INDEX.""" + self.items.append(item) + + def getCompiler(self, strings, parent, isCFF2=None): + return self.compilerClass(self, strings, parent, isCFF2=isCFF2) + + def clear(self): + """Empty the INDEX.""" + del self.items[:] + + +class GlobalSubrsIndex(Index): + """This index contains all the global subroutines in the font. A global + subroutine is a set of ``CharString`` data which is accessible to any + glyph in the font, and are used to store repeated instructions - for + example, components may be encoded as global subroutines, but so could + hinting instructions. + + Remember that when interpreting a ``callgsubr`` instruction (or indeed + a ``callsubr`` instruction) that you will need to add the "subroutine + number bias" to number given: + + .. code:: python + + tt = ttLib.TTFont("Almendra-Bold.otf") + u = tt["CFF "].cff[0].CharStrings["udieresis"] + u.decompile() + + u.toXML(XMLWriter(sys.stdout)) + # + # -64 callgsubr <-- Subroutine which implements the dieresis mark + # + + tt["CFF "].cff[0].GlobalSubrs[-64] # <-- WRONG + # + + tt["CFF "].cff[0].GlobalSubrs[-64 + 107] # <-- RIGHT + # + + ("The bias applied depends on the number of subrs (gsubrs). If the number of + subrs (gsubrs) is less than 1240, the bias is 107. Otherwise if it is less + than 33900, it is 1131; otherwise it is 32768.", + `Subroutine Operators `) + """ + + compilerClass = GlobalSubrsCompiler + subrClass = psCharStrings.T2CharString + charStringClass = psCharStrings.T2CharString + + def __init__( + self, + file=None, + globalSubrs=None, + private=None, + fdSelect=None, + fdArray=None, + isCFF2=None, + ): + super(GlobalSubrsIndex, self).__init__(file, isCFF2=isCFF2) + self.globalSubrs = globalSubrs + self.private = private + if fdSelect: + self.fdSelect = fdSelect + if fdArray: + self.fdArray = fdArray + + def produceItem(self, index, data, file, offset): + if self.private is not None: + private = self.private + elif hasattr(self, "fdArray") and self.fdArray is not None: + if hasattr(self, "fdSelect") and self.fdSelect is not None: + fdIndex = self.fdSelect[index] + else: + fdIndex = 0 + private = self.fdArray[fdIndex].Private + else: + private = None + return self.subrClass(data, private=private, globalSubrs=self.globalSubrs) + + def toXML(self, xmlWriter): + """Write the subroutines index into XML representation onto the given + :class:`fontTools.misc.xmlWriter.XMLWriter`. + + .. code:: python + + writer = xmlWriter.XMLWriter(sys.stdout) + tt["CFF "].cff[0].GlobalSubrs.toXML(writer) + + """ + xmlWriter.comment( + "The 'index' attribute is only for humans; " "it is ignored when parsed." + ) + xmlWriter.newline() + for i in range(len(self)): + subr = self[i] + if subr.needsDecompilation(): + xmlWriter.begintag("CharString", index=i, raw=1) + else: + xmlWriter.begintag("CharString", index=i) + xmlWriter.newline() + subr.toXML(xmlWriter) + xmlWriter.endtag("CharString") + xmlWriter.newline() + + def fromXML(self, name, attrs, content): + if name != "CharString": + return + subr = self.subrClass() + subr.fromXML(name, attrs, content) + self.append(subr) + + def getItemAndSelector(self, index): + sel = None + if hasattr(self, "fdSelect"): + sel = self.fdSelect[index] + return self[index], sel + + +class SubrsIndex(GlobalSubrsIndex): + """This index contains a glyph's local subroutines. A local subroutine is a + private set of ``CharString`` data which is accessible only to the glyph to + which the index is attached.""" + + compilerClass = SubrsCompiler + + +class TopDictIndex(Index): + """This index represents the array of ``TopDict`` structures in the font + (again, usually only one entry is present). Hence the following calls are + equivalent: + + .. code:: python + + tt["CFF "].cff[0] + # + tt["CFF "].cff.topDictIndex[0] + # + + """ + + compilerClass = TopDictIndexCompiler + + def __init__(self, file=None, cff2GetGlyphOrder=None, topSize=0, isCFF2=None): + self.cff2GetGlyphOrder = cff2GetGlyphOrder + if file is not None and isCFF2: + self._isCFF2 = isCFF2 + self.items = [] + name = self.__class__.__name__ + log.log(DEBUG, "loading %s at %s", name, file.tell()) + self.file = file + count = 1 + self.items = [None] * count + self.offsets = [0, topSize] + self.offsetBase = file.tell() + # pretend we've read the whole lot + file.seek(self.offsetBase + topSize) + log.log(DEBUG, " end of %s at %s", name, file.tell()) + else: + super(TopDictIndex, self).__init__(file, isCFF2=isCFF2) + + def produceItem(self, index, data, file, offset): + top = TopDict( + self.strings, + file, + offset, + self.GlobalSubrs, + self.cff2GetGlyphOrder, + isCFF2=self._isCFF2, + ) + top.decompile(data) + return top + + def toXML(self, xmlWriter): + for i in range(len(self)): + xmlWriter.begintag("FontDict", index=i) + xmlWriter.newline() + self[i].toXML(xmlWriter) + xmlWriter.endtag("FontDict") + xmlWriter.newline() + + +class FDArrayIndex(Index): + compilerClass = FDArrayIndexCompiler + + def toXML(self, xmlWriter): + for i in range(len(self)): + xmlWriter.begintag("FontDict", index=i) + xmlWriter.newline() + self[i].toXML(xmlWriter) + xmlWriter.endtag("FontDict") + xmlWriter.newline() + + def produceItem(self, index, data, file, offset): + fontDict = FontDict( + self.strings, + file, + offset, + self.GlobalSubrs, + isCFF2=self._isCFF2, + vstore=self.vstore, + ) + fontDict.decompile(data) + return fontDict + + def fromXML(self, name, attrs, content): + if name != "FontDict": + return + fontDict = FontDict() + for element in content: + if isinstance(element, str): + continue + name, attrs, content = element + fontDict.fromXML(name, attrs, content) + self.append(fontDict) + + +class VarStoreData(object): + def __init__(self, file=None, otVarStore=None): + self.file = file + self.data = None + self.otVarStore = otVarStore + self.font = TTFont() # dummy font for the decompile function. + + def decompile(self): + if self.file: + # read data in from file. Assume position is correct. + length = readCard16(self.file) + # https://github.com/fonttools/fonttools/issues/3673 + if length == 65535: + self.data = self.file.read() + else: + self.data = self.file.read(length) + globalState = {} + reader = OTTableReader(self.data, globalState) + self.otVarStore = ot.VarStore() + self.otVarStore.decompile(reader, self.font) + self.data = None + return self + + def compile(self): + writer = OTTableWriter() + self.otVarStore.compile(writer, self.font) + # Note that this omits the initial Card16 length from the CFF2 + # VarStore data block + self.data = writer.getAllData() + + def writeXML(self, xmlWriter, name): + self.otVarStore.toXML(xmlWriter, self.font) + + def xmlRead(self, name, attrs, content, parent): + self.otVarStore = ot.VarStore() + for element in content: + if isinstance(element, tuple): + name, attrs, content = element + self.otVarStore.fromXML(name, attrs, content, self.font) + else: + pass + return None + + def __len__(self): + return len(self.data) + + def getNumRegions(self, vsIndex): + if vsIndex is None: + vsIndex = 0 + varData = self.otVarStore.VarData[vsIndex] + numRegions = varData.VarRegionCount + return numRegions + + +class FDSelect(object): + def __init__(self, file=None, numGlyphs=None, format=None): + if file: + # read data in from file + self.format = readCard8(file) + if self.format == 0: + from array import array + + self.gidArray = array("B", file.read(numGlyphs)).tolist() + elif self.format == 3: + gidArray = [None] * numGlyphs + nRanges = readCard16(file) + fd = None + prev = None + for i in range(nRanges): + first = readCard16(file) + if prev is not None: + for glyphID in range(prev, first): + gidArray[glyphID] = fd + prev = first + fd = readCard8(file) + if prev is not None: + first = readCard16(file) + for glyphID in range(prev, first): + gidArray[glyphID] = fd + self.gidArray = gidArray + elif self.format == 4: + gidArray = [None] * numGlyphs + nRanges = readCard32(file) + fd = None + prev = None + for i in range(nRanges): + first = readCard32(file) + if prev is not None: + for glyphID in range(prev, first): + gidArray[glyphID] = fd + prev = first + fd = readCard16(file) + if prev is not None: + first = readCard32(file) + for glyphID in range(prev, first): + gidArray[glyphID] = fd + self.gidArray = gidArray + else: + assert False, "unsupported FDSelect format: %s" % format + else: + # reading from XML. Make empty gidArray, and leave format as passed in. + # format is None will result in the smallest representation being used. + self.format = format + self.gidArray = [] + + def __len__(self): + return len(self.gidArray) + + def __getitem__(self, index): + return self.gidArray[index] + + def __setitem__(self, index, fdSelectValue): + self.gidArray[index] = fdSelectValue + + def append(self, fdSelectValue): + self.gidArray.append(fdSelectValue) + + +class CharStrings(object): + """The ``CharStrings`` in the font represent the instructions for drawing + each glyph. This object presents a dictionary interface to the font's + CharStrings, indexed by glyph name: + + .. code:: python + + tt["CFF "].cff[0].CharStrings["a"] + # + + See :class:`fontTools.misc.psCharStrings.T1CharString` and + :class:`fontTools.misc.psCharStrings.T2CharString` for how to decompile, + compile and interpret the glyph drawing instructions in the returned objects. + + """ + + def __init__( + self, + file, + charset, + globalSubrs, + private, + fdSelect, + fdArray, + isCFF2=None, + varStore=None, + ): + self.globalSubrs = globalSubrs + self.varStore = varStore + if file is not None: + self.charStringsIndex = SubrsIndex( + file, globalSubrs, private, fdSelect, fdArray, isCFF2=isCFF2 + ) + self.charStrings = charStrings = {} + for i in range(len(charset)): + charStrings[charset[i]] = i + # read from OTF file: charStrings.values() are indices into + # charStringsIndex. + self.charStringsAreIndexed = 1 + else: + self.charStrings = {} + # read from ttx file: charStrings.values() are actual charstrings + self.charStringsAreIndexed = 0 + self.private = private + if fdSelect is not None: + self.fdSelect = fdSelect + if fdArray is not None: + self.fdArray = fdArray + + def keys(self): + return list(self.charStrings.keys()) + + def values(self): + if self.charStringsAreIndexed: + return self.charStringsIndex + else: + return list(self.charStrings.values()) + + def has_key(self, name): + return name in self.charStrings + + __contains__ = has_key + + def __len__(self): + return len(self.charStrings) + + def __getitem__(self, name): + charString = self.charStrings[name] + if self.charStringsAreIndexed: + charString = self.charStringsIndex[charString] + return charString + + def __setitem__(self, name, charString): + if self.charStringsAreIndexed: + index = self.charStrings[name] + self.charStringsIndex[index] = charString + else: + self.charStrings[name] = charString + + def getItemAndSelector(self, name): + if self.charStringsAreIndexed: + index = self.charStrings[name] + return self.charStringsIndex.getItemAndSelector(index) + else: + if hasattr(self, "fdArray"): + if hasattr(self, "fdSelect"): + sel = self.charStrings[name].fdSelectIndex + else: + sel = 0 + else: + sel = None + return self.charStrings[name], sel + + def toXML(self, xmlWriter): + names = sorted(self.keys()) + for name in names: + charStr, fdSelectIndex = self.getItemAndSelector(name) + if charStr.needsDecompilation(): + raw = [("raw", 1)] + else: + raw = [] + if fdSelectIndex is None: + xmlWriter.begintag("CharString", [("name", name)] + raw) + else: + xmlWriter.begintag( + "CharString", + [("name", name), ("fdSelectIndex", fdSelectIndex)] + raw, + ) + xmlWriter.newline() + charStr.toXML(xmlWriter) + xmlWriter.endtag("CharString") + xmlWriter.newline() + + def fromXML(self, name, attrs, content): + for element in content: + if isinstance(element, str): + continue + name, attrs, content = element + if name != "CharString": + continue + fdID = -1 + if hasattr(self, "fdArray"): + try: + fdID = safeEval(attrs["fdSelectIndex"]) + except KeyError: + fdID = 0 + private = self.fdArray[fdID].Private + else: + private = self.private + + glyphName = attrs["name"] + charStringClass = psCharStrings.T2CharString + charString = charStringClass(private=private, globalSubrs=self.globalSubrs) + charString.fromXML(name, attrs, content) + if fdID >= 0: + charString.fdSelectIndex = fdID + self[glyphName] = charString + + +def readCard8(file): + return byteord(file.read(1)) + + +def readCard16(file): + (value,) = struct.unpack(">H", file.read(2)) + return value + + +def readCard32(file): + (value,) = struct.unpack(">L", file.read(4)) + return value + + +def writeCard8(file, value): + file.write(bytechr(value)) + + +def writeCard16(file, value): + file.write(struct.pack(">H", value)) + + +def writeCard32(file, value): + file.write(struct.pack(">L", value)) + + +def packCard8(value): + return bytechr(value) + + +def packCard16(value): + return struct.pack(">H", value) + + +def packCard32(value): + return struct.pack(">L", value) + + +def buildOperatorDict(table): + d = {} + for op, name, arg, default, conv in table: + d[op] = (name, arg) + return d + + +def buildOpcodeDict(table): + d = {} + for op, name, arg, default, conv in table: + if isinstance(op, tuple): + op = bytechr(op[0]) + bytechr(op[1]) + else: + op = bytechr(op) + d[name] = (op, arg) + return d + + +def buildOrder(table): + l = [] + for op, name, arg, default, conv in table: + l.append(name) + return l + + +def buildDefaults(table): + d = {} + for op, name, arg, default, conv in table: + if default is not None: + d[name] = default + return d + + +def buildConverters(table): + d = {} + for op, name, arg, default, conv in table: + d[name] = conv + return d + + +class SimpleConverter(object): + def read(self, parent, value): + if not hasattr(parent, "file"): + return self._read(parent, value) + file = parent.file + pos = file.tell() + try: + return self._read(parent, value) + finally: + file.seek(pos) + + def _read(self, parent, value): + return value + + def write(self, parent, value): + return value + + def xmlWrite(self, xmlWriter, name, value): + xmlWriter.simpletag(name, value=value) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + return attrs["value"] + + +class ASCIIConverter(SimpleConverter): + def _read(self, parent, value): + return tostr(value, encoding="ascii") + + def write(self, parent, value): + return tobytes(value, encoding="ascii") + + def xmlWrite(self, xmlWriter, name, value): + xmlWriter.simpletag(name, value=tostr(value, encoding="ascii")) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + return tobytes(attrs["value"], encoding=("ascii")) + + +class Latin1Converter(SimpleConverter): + def _read(self, parent, value): + return tostr(value, encoding="latin1") + + def write(self, parent, value): + return tobytes(value, encoding="latin1") + + def xmlWrite(self, xmlWriter, name, value): + value = tostr(value, encoding="latin1") + if name in ["Notice", "Copyright"]: + value = re.sub(r"[\r\n]\s+", " ", value) + xmlWriter.simpletag(name, value=value) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + return tobytes(attrs["value"], encoding=("latin1")) + + +def parseNum(s): + try: + value = int(s) + except: + value = float(s) + return value + + +def parseBlendList(s): + valueList = [] + for element in s: + if isinstance(element, str): + continue + name, attrs, content = element + blendList = attrs["value"].split() + blendList = [eval(val) for val in blendList] + valueList.append(blendList) + if len(valueList) == 1: + valueList = valueList[0] + return valueList + + +class NumberConverter(SimpleConverter): + def xmlWrite(self, xmlWriter, name, value): + if isinstance(value, list): + xmlWriter.begintag(name) + xmlWriter.newline() + xmlWriter.indent() + blendValue = " ".join([str(val) for val in value]) + xmlWriter.simpletag(kBlendDictOpName, value=blendValue) + xmlWriter.newline() + xmlWriter.dedent() + xmlWriter.endtag(name) + xmlWriter.newline() + else: + xmlWriter.simpletag(name, value=value) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + valueString = attrs.get("value", None) + if valueString is None: + value = parseBlendList(content) + else: + value = parseNum(attrs["value"]) + return value + + +class ArrayConverter(SimpleConverter): + def xmlWrite(self, xmlWriter, name, value): + if value and isinstance(value[0], list): + xmlWriter.begintag(name) + xmlWriter.newline() + xmlWriter.indent() + for valueList in value: + blendValue = " ".join([str(val) for val in valueList]) + xmlWriter.simpletag(kBlendDictOpName, value=blendValue) + xmlWriter.newline() + xmlWriter.dedent() + xmlWriter.endtag(name) + xmlWriter.newline() + else: + value = " ".join([str(val) for val in value]) + xmlWriter.simpletag(name, value=value) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + valueString = attrs.get("value", None) + if valueString is None: + valueList = parseBlendList(content) + else: + values = valueString.split() + valueList = [parseNum(value) for value in values] + return valueList + + +class TableConverter(SimpleConverter): + def xmlWrite(self, xmlWriter, name, value): + xmlWriter.begintag(name) + xmlWriter.newline() + value.toXML(xmlWriter) + xmlWriter.endtag(name) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + ob = self.getClass()() + for element in content: + if isinstance(element, str): + continue + name, attrs, content = element + ob.fromXML(name, attrs, content) + return ob + + +class PrivateDictConverter(TableConverter): + def getClass(self): + return PrivateDict + + def _read(self, parent, value): + size, offset = value + file = parent.file + isCFF2 = parent._isCFF2 + try: + vstore = parent.vstore + except AttributeError: + vstore = None + priv = PrivateDict(parent.strings, file, offset, isCFF2=isCFF2, vstore=vstore) + file.seek(offset) + data = file.read(size) + assert len(data) == size + priv.decompile(data) + return priv + + def write(self, parent, value): + return (0, 0) # dummy value + + +class SubrsConverter(TableConverter): + def getClass(self): + return SubrsIndex + + def _read(self, parent, value): + file = parent.file + isCFF2 = parent._isCFF2 + file.seek(parent.offset + value) # Offset(self) + return SubrsIndex(file, isCFF2=isCFF2) + + def write(self, parent, value): + return 0 # dummy value + + +class CharStringsConverter(TableConverter): + def _read(self, parent, value): + file = parent.file + isCFF2 = parent._isCFF2 + charset = parent.charset + varStore = getattr(parent, "VarStore", None) + globalSubrs = parent.GlobalSubrs + if hasattr(parent, "FDArray"): + fdArray = parent.FDArray + if hasattr(parent, "FDSelect"): + fdSelect = parent.FDSelect + else: + fdSelect = None + private = None + else: + fdSelect, fdArray = None, None + private = parent.Private + file.seek(value) # Offset(0) + charStrings = CharStrings( + file, + charset, + globalSubrs, + private, + fdSelect, + fdArray, + isCFF2=isCFF2, + varStore=varStore, + ) + return charStrings + + def write(self, parent, value): + return 0 # dummy value + + def xmlRead(self, name, attrs, content, parent): + if hasattr(parent, "FDArray"): + # if it is a CID-keyed font, then the private Dict is extracted from the + # parent.FDArray + fdArray = parent.FDArray + if hasattr(parent, "FDSelect"): + fdSelect = parent.FDSelect + else: + fdSelect = None + private = None + else: + # if it is a name-keyed font, then the private dict is in the top dict, + # and + # there is no fdArray. + private, fdSelect, fdArray = parent.Private, None, None + charStrings = CharStrings( + None, + None, + parent.GlobalSubrs, + private, + fdSelect, + fdArray, + varStore=getattr(parent, "VarStore", None), + ) + charStrings.fromXML(name, attrs, content) + return charStrings + + +class CharsetConverter(SimpleConverter): + def _read(self, parent, value): + isCID = hasattr(parent, "ROS") + if value > 2: + numGlyphs = parent.numGlyphs + file = parent.file + file.seek(value) + log.log(DEBUG, "loading charset at %s", value) + format = readCard8(file) + if format == 0: + charset = parseCharset0(numGlyphs, file, parent.strings, isCID) + elif format == 1 or format == 2: + charset = parseCharset(numGlyphs, file, parent.strings, isCID, format) + else: + raise NotImplementedError + assert len(charset) == numGlyphs + log.log(DEBUG, " charset end at %s", file.tell()) + # make sure glyph names are unique + allNames = {} + newCharset = [] + for glyphName in charset: + if glyphName in allNames: + # make up a new glyphName that's unique + n = allNames[glyphName] + names = set(allNames) | set(charset) + while (glyphName + "." + str(n)) in names: + n += 1 + allNames[glyphName] = n + 1 + glyphName = glyphName + "." + str(n) + allNames[glyphName] = 1 + newCharset.append(glyphName) + charset = newCharset + else: # offset == 0 -> no charset data. + if isCID or "CharStrings" not in parent.rawDict: + # We get here only when processing fontDicts from the FDArray of + # CFF-CID fonts. Only the real topDict references the charset. + assert value == 0 + charset = None + elif value == 0: + charset = cffISOAdobeStrings + elif value == 1: + charset = cffIExpertStrings + elif value == 2: + charset = cffExpertSubsetStrings + if charset and (len(charset) != parent.numGlyphs): + charset = charset[: parent.numGlyphs] + return charset + + def write(self, parent, value): + return 0 # dummy value + + def xmlWrite(self, xmlWriter, name, value): + # XXX only write charset when not in OT/TTX context, where we + # dump charset as a separate "GlyphOrder" table. + # # xmlWriter.simpletag("charset") + xmlWriter.comment("charset is dumped separately as the 'GlyphOrder' element") + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + pass + + +class CharsetCompiler(object): + def __init__(self, strings, charset, parent): + assert charset[0] == ".notdef" + isCID = hasattr(parent.dictObj, "ROS") + data0 = packCharset0(charset, isCID, strings) + data = packCharset(charset, isCID, strings) + if len(data) < len(data0): + self.data = data + else: + self.data = data0 + self.parent = parent + + def setPos(self, pos, endPos): + self.parent.rawDict["charset"] = pos + + def getDataLength(self): + return len(self.data) + + def toFile(self, file): + file.write(self.data) + + +def getStdCharSet(charset): + # check to see if we can use a predefined charset value. + predefinedCharSetVal = None + predefinedCharSets = [ + (cffISOAdobeStringCount, cffISOAdobeStrings, 0), + (cffExpertStringCount, cffIExpertStrings, 1), + (cffExpertSubsetStringCount, cffExpertSubsetStrings, 2), + ] + lcs = len(charset) + for cnt, pcs, csv in predefinedCharSets: + if predefinedCharSetVal is not None: + break + if lcs > cnt: + continue + predefinedCharSetVal = csv + for i in range(lcs): + if charset[i] != pcs[i]: + predefinedCharSetVal = None + break + return predefinedCharSetVal + + +def getCIDfromName(name, strings): + return int(name[3:]) + + +def getSIDfromName(name, strings): + return strings.getSID(name) + + +def packCharset0(charset, isCID, strings): + fmt = 0 + data = [packCard8(fmt)] + if isCID: + getNameID = getCIDfromName + else: + getNameID = getSIDfromName + + for name in charset[1:]: + data.append(packCard16(getNameID(name, strings))) + return bytesjoin(data) + + +def packCharset(charset, isCID, strings): + fmt = 1 + ranges = [] + first = None + end = 0 + if isCID: + getNameID = getCIDfromName + else: + getNameID = getSIDfromName + + for name in charset[1:]: + SID = getNameID(name, strings) + if first is None: + first = SID + elif end + 1 != SID: + nLeft = end - first + if nLeft > 255: + fmt = 2 + ranges.append((first, nLeft)) + first = SID + end = SID + if end: + nLeft = end - first + if nLeft > 255: + fmt = 2 + ranges.append((first, nLeft)) + + data = [packCard8(fmt)] + if fmt == 1: + nLeftFunc = packCard8 + else: + nLeftFunc = packCard16 + for first, nLeft in ranges: + data.append(packCard16(first) + nLeftFunc(nLeft)) + return bytesjoin(data) + + +def parseCharset0(numGlyphs, file, strings, isCID): + charset = [".notdef"] + if isCID: + for i in range(numGlyphs - 1): + CID = readCard16(file) + charset.append("cid" + str(CID).zfill(5)) + else: + for i in range(numGlyphs - 1): + SID = readCard16(file) + charset.append(strings[SID]) + return charset + + +def parseCharset(numGlyphs, file, strings, isCID, fmt): + charset = [".notdef"] + count = 1 + if fmt == 1: + nLeftFunc = readCard8 + else: + nLeftFunc = readCard16 + while count < numGlyphs: + first = readCard16(file) + nLeft = nLeftFunc(file) + if isCID: + for CID in range(first, first + nLeft + 1): + charset.append("cid" + str(CID).zfill(5)) + else: + for SID in range(first, first + nLeft + 1): + charset.append(strings[SID]) + count = count + nLeft + 1 + return charset + + +class EncodingCompiler(object): + def __init__(self, strings, encoding, parent): + assert not isinstance(encoding, str) + data0 = packEncoding0(parent.dictObj.charset, encoding, parent.strings) + data1 = packEncoding1(parent.dictObj.charset, encoding, parent.strings) + if len(data0) < len(data1): + self.data = data0 + else: + self.data = data1 + self.parent = parent + + def setPos(self, pos, endPos): + self.parent.rawDict["Encoding"] = pos + + def getDataLength(self): + return len(self.data) + + def toFile(self, file): + file.write(self.data) + + +class EncodingConverter(SimpleConverter): + def _read(self, parent, value): + if value == 0: + return "StandardEncoding" + elif value == 1: + return "ExpertEncoding" + # custom encoding at offset `value` + assert value > 1 + file = parent.file + file.seek(value) + log.log(DEBUG, "loading Encoding at %s", value) + fmt = readCard8(file) + haveSupplement = bool(fmt & 0x80) + fmt = fmt & 0x7F + + if fmt == 0: + encoding = parseEncoding0(parent.charset, file) + elif fmt == 1: + encoding = parseEncoding1(parent.charset, file) + else: + raise ValueError(f"Unknown Encoding format: {fmt}") + + if haveSupplement: + parseEncodingSupplement(file, encoding, parent.strings) + + return encoding + + def write(self, parent, value): + if value == "StandardEncoding": + return 0 + elif value == "ExpertEncoding": + return 1 + return 0 # dummy value + + def xmlWrite(self, xmlWriter, name, value): + if value in ("StandardEncoding", "ExpertEncoding"): + xmlWriter.simpletag(name, name=value) + xmlWriter.newline() + return + xmlWriter.begintag(name) + xmlWriter.newline() + for code in range(len(value)): + glyphName = value[code] + if glyphName != ".notdef": + xmlWriter.simpletag("map", code=hex(code), name=glyphName) + xmlWriter.newline() + xmlWriter.endtag(name) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + if "name" in attrs: + return attrs["name"] + encoding = [".notdef"] * 256 + for element in content: + if isinstance(element, str): + continue + name, attrs, content = element + code = safeEval(attrs["code"]) + glyphName = attrs["name"] + encoding[code] = glyphName + return encoding + + +def readSID(file): + """Read a String ID (SID) — 2-byte unsigned integer.""" + data = file.read(2) + if len(data) != 2: + raise EOFError("Unexpected end of file while reading SID") + return struct.unpack(">H", data)[0] # big-endian uint16 + + +def parseEncodingSupplement(file, encoding, strings): + """ + Parse the CFF Encoding supplement data: + - nSups: number of supplementary mappings + - each mapping: (code, SID) pair + and apply them to the `encoding` list in place. + """ + nSups = readCard8(file) + for _ in range(nSups): + code = readCard8(file) + sid = readSID(file) + name = strings[sid] + encoding[code] = name + + +def parseEncoding0(charset, file): + """ + Format 0: simple list of codes. + After reading the base table, optionally parse the supplement. + """ + nCodes = readCard8(file) + encoding = [".notdef"] * 256 + for glyphID in range(1, nCodes + 1): + code = readCard8(file) + if code != 0: + encoding[code] = charset[glyphID] + + return encoding + + +def parseEncoding1(charset, file): + """ + Format 1: range-based encoding. + After reading the base ranges, optionally parse the supplement. + """ + nRanges = readCard8(file) + encoding = [".notdef"] * 256 + glyphID = 1 + for _ in range(nRanges): + code = readCard8(file) + nLeft = readCard8(file) + for _ in range(nLeft + 1): + encoding[code] = charset[glyphID] + code += 1 + glyphID += 1 + + return encoding + + +def packEncoding0(charset, encoding, strings): + fmt = 0 + m = {} + for code in range(len(encoding)): + name = encoding[code] + if name != ".notdef": + m[name] = code + codes = [] + for name in charset[1:]: + code = m.get(name) + codes.append(code) + + while codes and codes[-1] is None: + codes.pop() + + data = [packCard8(fmt), packCard8(len(codes))] + for code in codes: + if code is None: + code = 0 + data.append(packCard8(code)) + return bytesjoin(data) + + +def packEncoding1(charset, encoding, strings): + fmt = 1 + m = {} + for code in range(len(encoding)): + name = encoding[code] + if name != ".notdef": + m[name] = code + ranges = [] + first = None + end = 0 + for name in charset[1:]: + code = m.get(name, -1) + if first is None: + first = code + elif end + 1 != code: + nLeft = end - first + ranges.append((first, nLeft)) + first = code + end = code + nLeft = end - first + ranges.append((first, nLeft)) + + # remove unencoded glyphs at the end. + while ranges and ranges[-1][0] == -1: + ranges.pop() + + data = [packCard8(fmt), packCard8(len(ranges))] + for first, nLeft in ranges: + if first == -1: # unencoded + first = 0 + data.append(packCard8(first) + packCard8(nLeft)) + return bytesjoin(data) + + +class FDArrayConverter(TableConverter): + def _read(self, parent, value): + try: + vstore = parent.VarStore + except AttributeError: + vstore = None + file = parent.file + isCFF2 = parent._isCFF2 + file.seek(value) + fdArray = FDArrayIndex(file, isCFF2=isCFF2) + fdArray.vstore = vstore + fdArray.strings = parent.strings + fdArray.GlobalSubrs = parent.GlobalSubrs + return fdArray + + def write(self, parent, value): + return 0 # dummy value + + def xmlRead(self, name, attrs, content, parent): + fdArray = FDArrayIndex() + for element in content: + if isinstance(element, str): + continue + name, attrs, content = element + fdArray.fromXML(name, attrs, content) + return fdArray + + +class FDSelectConverter(SimpleConverter): + def _read(self, parent, value): + file = parent.file + file.seek(value) + fdSelect = FDSelect(file, parent.numGlyphs) + return fdSelect + + def write(self, parent, value): + return 0 # dummy value + + # The FDSelect glyph data is written out to XML in the charstring keys, + # so we write out only the format selector + def xmlWrite(self, xmlWriter, name, value): + xmlWriter.simpletag(name, [("format", value.format)]) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + fmt = safeEval(attrs["format"]) + file = None + numGlyphs = None + fdSelect = FDSelect(file, numGlyphs, fmt) + return fdSelect + + +class VarStoreConverter(SimpleConverter): + def _read(self, parent, value): + file = parent.file + file.seek(value) + varStore = VarStoreData(file) + varStore.decompile() + return varStore + + def write(self, parent, value): + return 0 # dummy value + + def xmlWrite(self, xmlWriter, name, value): + value.writeXML(xmlWriter, name) + + def xmlRead(self, name, attrs, content, parent): + varStore = VarStoreData() + varStore.xmlRead(name, attrs, content, parent) + return varStore + + +def packFDSelect0(fdSelectArray): + fmt = 0 + data = [packCard8(fmt)] + for index in fdSelectArray: + data.append(packCard8(index)) + return bytesjoin(data) + + +def packFDSelect3(fdSelectArray): + fmt = 3 + fdRanges = [] + lenArray = len(fdSelectArray) + lastFDIndex = -1 + for i in range(lenArray): + fdIndex = fdSelectArray[i] + if lastFDIndex != fdIndex: + fdRanges.append([i, fdIndex]) + lastFDIndex = fdIndex + sentinelGID = i + 1 + + data = [packCard8(fmt)] + data.append(packCard16(len(fdRanges))) + for fdRange in fdRanges: + data.append(packCard16(fdRange[0])) + data.append(packCard8(fdRange[1])) + data.append(packCard16(sentinelGID)) + return bytesjoin(data) + + +def packFDSelect4(fdSelectArray): + fmt = 4 + fdRanges = [] + lenArray = len(fdSelectArray) + lastFDIndex = -1 + for i in range(lenArray): + fdIndex = fdSelectArray[i] + if lastFDIndex != fdIndex: + fdRanges.append([i, fdIndex]) + lastFDIndex = fdIndex + sentinelGID = i + 1 + + data = [packCard8(fmt)] + data.append(packCard32(len(fdRanges))) + for fdRange in fdRanges: + data.append(packCard32(fdRange[0])) + data.append(packCard16(fdRange[1])) + data.append(packCard32(sentinelGID)) + return bytesjoin(data) + + +class FDSelectCompiler(object): + def __init__(self, fdSelect, parent): + fmt = fdSelect.format + fdSelectArray = fdSelect.gidArray + if fmt == 0: + self.data = packFDSelect0(fdSelectArray) + elif fmt == 3: + self.data = packFDSelect3(fdSelectArray) + elif fmt == 4: + self.data = packFDSelect4(fdSelectArray) + else: + # choose smaller of the two formats + data0 = packFDSelect0(fdSelectArray) + data3 = packFDSelect3(fdSelectArray) + if len(data0) < len(data3): + self.data = data0 + fdSelect.format = 0 + else: + self.data = data3 + fdSelect.format = 3 + + self.parent = parent + + def setPos(self, pos, endPos): + self.parent.rawDict["FDSelect"] = pos + + def getDataLength(self): + return len(self.data) + + def toFile(self, file): + file.write(self.data) + + +class VarStoreCompiler(object): + def __init__(self, varStoreData, parent): + self.parent = parent + if not varStoreData.data: + varStoreData.compile() + varStoreDataLen = min(0xFFFF, len(varStoreData.data)) + data = [packCard16(varStoreDataLen), varStoreData.data] + self.data = bytesjoin(data) + + def setPos(self, pos, endPos): + self.parent.rawDict["VarStore"] = pos + + def getDataLength(self): + return len(self.data) + + def toFile(self, file): + file.write(self.data) + + +class ROSConverter(SimpleConverter): + def xmlWrite(self, xmlWriter, name, value): + registry, order, supplement = value + xmlWriter.simpletag( + name, + [ + ("Registry", tostr(registry)), + ("Order", tostr(order)), + ("Supplement", supplement), + ], + ) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + return (attrs["Registry"], attrs["Order"], safeEval(attrs["Supplement"])) + + +topDictOperators = [ + # opcode name argument type default converter + (25, "maxstack", "number", None, None), + ((12, 30), "ROS", ("SID", "SID", "number"), None, ROSConverter()), + ((12, 20), "SyntheticBase", "number", None, None), + (0, "version", "SID", None, None), + (1, "Notice", "SID", None, Latin1Converter()), + ((12, 0), "Copyright", "SID", None, Latin1Converter()), + (2, "FullName", "SID", None, Latin1Converter()), + ((12, 38), "FontName", "SID", None, Latin1Converter()), + (3, "FamilyName", "SID", None, Latin1Converter()), + (4, "Weight", "SID", None, None), + ((12, 1), "isFixedPitch", "number", 0, None), + ((12, 2), "ItalicAngle", "number", 0, None), + ((12, 3), "UnderlinePosition", "number", -100, None), + ((12, 4), "UnderlineThickness", "number", 50, None), + ((12, 5), "PaintType", "number", 0, None), + ((12, 6), "CharstringType", "number", 2, None), + ((12, 7), "FontMatrix", "array", [0.001, 0, 0, 0.001, 0, 0], None), + (13, "UniqueID", "number", None, None), + (5, "FontBBox", "array", [0, 0, 0, 0], None), + ((12, 8), "StrokeWidth", "number", 0, None), + (14, "XUID", "array", None, None), + ((12, 21), "PostScript", "SID", None, None), + ((12, 22), "BaseFontName", "SID", None, None), + ((12, 23), "BaseFontBlend", "delta", None, None), + ((12, 31), "CIDFontVersion", "number", 0, None), + ((12, 32), "CIDFontRevision", "number", 0, None), + ((12, 33), "CIDFontType", "number", 0, None), + ((12, 34), "CIDCount", "number", 8720, None), + (15, "charset", "number", None, CharsetConverter()), + ((12, 35), "UIDBase", "number", None, None), + (16, "Encoding", "number", 0, EncodingConverter()), + (18, "Private", ("number", "number"), None, PrivateDictConverter()), + ((12, 37), "FDSelect", "number", None, FDSelectConverter()), + ((12, 36), "FDArray", "number", None, FDArrayConverter()), + (17, "CharStrings", "number", None, CharStringsConverter()), + (24, "VarStore", "number", None, VarStoreConverter()), +] + +topDictOperators2 = [ + # opcode name argument type default converter + (25, "maxstack", "number", None, None), + ((12, 7), "FontMatrix", "array", [0.001, 0, 0, 0.001, 0, 0], None), + ((12, 37), "FDSelect", "number", None, FDSelectConverter()), + ((12, 36), "FDArray", "number", None, FDArrayConverter()), + (17, "CharStrings", "number", None, CharStringsConverter()), + (24, "VarStore", "number", None, VarStoreConverter()), +] + +# Note! FDSelect and FDArray must both preceed CharStrings in the output XML build order, +# in order for the font to compile back from xml. + +kBlendDictOpName = "blend" +blendOp = 23 + +privateDictOperators = [ + # opcode name argument type default converter + (22, "vsindex", "number", None, None), + ( + blendOp, + kBlendDictOpName, + "blendList", + None, + None, + ), # This is for reading to/from XML: it not written to CFF. + (6, "BlueValues", "delta", None, None), + (7, "OtherBlues", "delta", None, None), + (8, "FamilyBlues", "delta", None, None), + (9, "FamilyOtherBlues", "delta", None, None), + ((12, 9), "BlueScale", "number", 0.039625, None), + ((12, 10), "BlueShift", "number", 7, None), + ((12, 11), "BlueFuzz", "number", 1, None), + (10, "StdHW", "number", None, None), + (11, "StdVW", "number", None, None), + ((12, 12), "StemSnapH", "delta", None, None), + ((12, 13), "StemSnapV", "delta", None, None), + ((12, 14), "ForceBold", "number", 0, None), + ((12, 15), "ForceBoldThreshold", "number", None, None), # deprecated + ((12, 16), "lenIV", "number", None, None), # deprecated + ((12, 17), "LanguageGroup", "number", 0, None), + ((12, 18), "ExpansionFactor", "number", 0.06, None), + ((12, 19), "initialRandomSeed", "number", 0, None), + (20, "defaultWidthX", "number", 0, None), + (21, "nominalWidthX", "number", 0, None), + (19, "Subrs", "number", None, SubrsConverter()), +] + +privateDictOperators2 = [ + # opcode name argument type default converter + (22, "vsindex", "number", None, None), + ( + blendOp, + kBlendDictOpName, + "blendList", + None, + None, + ), # This is for reading to/from XML: it not written to CFF. + (6, "BlueValues", "delta", None, None), + (7, "OtherBlues", "delta", None, None), + (8, "FamilyBlues", "delta", None, None), + (9, "FamilyOtherBlues", "delta", None, None), + ((12, 9), "BlueScale", "number", 0.039625, None), + ((12, 10), "BlueShift", "number", 7, None), + ((12, 11), "BlueFuzz", "number", 1, None), + (10, "StdHW", "number", None, None), + (11, "StdVW", "number", None, None), + ((12, 12), "StemSnapH", "delta", None, None), + ((12, 13), "StemSnapV", "delta", None, None), + ((12, 17), "LanguageGroup", "number", 0, None), + ((12, 18), "ExpansionFactor", "number", 0.06, None), + (19, "Subrs", "number", None, SubrsConverter()), +] + + +def addConverters(table): + for i in range(len(table)): + op, name, arg, default, conv = table[i] + if conv is not None: + continue + if arg in ("delta", "array"): + conv = ArrayConverter() + elif arg == "number": + conv = NumberConverter() + elif arg == "SID": + conv = ASCIIConverter() + elif arg == "blendList": + conv = None + else: + assert False + table[i] = op, name, arg, default, conv + + +addConverters(privateDictOperators) +addConverters(topDictOperators) + + +class TopDictDecompiler(psCharStrings.DictDecompiler): + operators = buildOperatorDict(topDictOperators) + + +class PrivateDictDecompiler(psCharStrings.DictDecompiler): + operators = buildOperatorDict(privateDictOperators) + + +class DictCompiler(object): + maxBlendStack = 0 + + def __init__(self, dictObj, strings, parent, isCFF2=None): + if strings: + assert isinstance(strings, IndexedStrings) + if isCFF2 is None and hasattr(parent, "isCFF2"): + isCFF2 = parent.isCFF2 + assert isCFF2 is not None + self.isCFF2 = isCFF2 + self.dictObj = dictObj + self.strings = strings + self.parent = parent + rawDict = {} + for name in dictObj.order: + value = getattr(dictObj, name, None) + if value is None: + continue + conv = dictObj.converters[name] + value = conv.write(dictObj, value) + if value == dictObj.defaults.get(name): + continue + rawDict[name] = value + self.rawDict = rawDict + + def setPos(self, pos, endPos): + pass + + def getDataLength(self): + return len(self.compile("getDataLength")) + + def compile(self, reason): + log.log(DEBUG, "-- compiling %s for %s", self.__class__.__name__, reason) + rawDict = self.rawDict + data = [] + for name in self.dictObj.order: + value = rawDict.get(name) + if value is None: + continue + op, argType = self.opcodes[name] + if isinstance(argType, tuple): + l = len(argType) + assert len(value) == l, "value doesn't match arg type" + for i in range(l): + arg = argType[i] + v = value[i] + arghandler = getattr(self, "arg_" + arg) + data.append(arghandler(v)) + else: + arghandler = getattr(self, "arg_" + argType) + data.append(arghandler(value)) + data.append(op) + data = bytesjoin(data) + return data + + def toFile(self, file): + data = self.compile("toFile") + file.write(data) + + def arg_number(self, num): + if isinstance(num, list): + data = [encodeNumber(val) for val in num] + data.append(encodeNumber(1)) + data.append(bytechr(blendOp)) + datum = bytesjoin(data) + else: + datum = encodeNumber(num) + return datum + + def arg_SID(self, s): + return psCharStrings.encodeIntCFF(self.strings.getSID(s)) + + def arg_array(self, value): + data = [] + for num in value: + data.append(self.arg_number(num)) + return bytesjoin(data) + + def arg_delta(self, value): + if not value: + return b"" + val0 = value[0] + if isinstance(val0, list): + data = self.arg_delta_blend(value) + else: + out = [] + last = 0 + for v in value: + out.append(v - last) + last = v + data = [] + for num in out: + data.append(encodeNumber(num)) + return bytesjoin(data) + + def arg_delta_blend(self, value): + """A delta list with blend lists has to be *all* blend lists. + + The value is a list is arranged as follows:: + + [ + [V0, d0..dn] + [V1, d0..dn] + ... + [Vm, d0..dn] + ] + + ``V`` is the absolute coordinate value from the default font, and ``d0-dn`` + are the delta values from the *n* regions. Each ``V`` is an absolute + coordinate from the default font. + + We want to return a list:: + + [ + [v0, v1..vm] + [d0..dn] + ... + [d0..dn] + numBlends + blendOp + ] + + where each ``v`` is relative to the previous default font value. + """ + numMasters = len(value[0]) + numBlends = len(value) + numStack = (numBlends * numMasters) + 1 + if numStack > self.maxBlendStack: + # Figure out the max number of value we can blend + # and divide this list up into chunks of that size. + + numBlendValues = int((self.maxBlendStack - 1) / numMasters) + out = [] + while True: + numVal = min(len(value), numBlendValues) + if numVal == 0: + break + valList = value[0:numVal] + out1 = self.arg_delta_blend(valList) + out.extend(out1) + value = value[numVal:] + else: + firstList = [0] * numBlends + deltaList = [None] * numBlends + i = 0 + prevVal = 0 + while i < numBlends: + # For PrivateDict BlueValues, the default font + # values are absolute, not relative. + # Must convert these back to relative coordinates + # before writing to CFF2. + defaultValue = value[i][0] + firstList[i] = defaultValue - prevVal + prevVal = defaultValue + deltaList[i] = value[i][1:] + i += 1 + + relValueList = firstList + for blendList in deltaList: + relValueList.extend(blendList) + out = [encodeNumber(val) for val in relValueList] + out.append(encodeNumber(numBlends)) + out.append(bytechr(blendOp)) + return out + + +def encodeNumber(num): + if isinstance(num, float): + return psCharStrings.encodeFloat(num) + else: + return psCharStrings.encodeIntCFF(num) + + +class TopDictCompiler(DictCompiler): + opcodes = buildOpcodeDict(topDictOperators) + + def getChildren(self, strings): + isCFF2 = self.isCFF2 + children = [] + if self.dictObj.cff2GetGlyphOrder is None: + if hasattr(self.dictObj, "charset") and self.dictObj.charset: + if hasattr(self.dictObj, "ROS"): # aka isCID + charsetCode = None + else: + charsetCode = getStdCharSet(self.dictObj.charset) + if charsetCode is None: + children.append( + CharsetCompiler(strings, self.dictObj.charset, self) + ) + else: + self.rawDict["charset"] = charsetCode + if hasattr(self.dictObj, "Encoding") and self.dictObj.Encoding: + encoding = self.dictObj.Encoding + if not isinstance(encoding, str): + children.append(EncodingCompiler(strings, encoding, self)) + else: + if hasattr(self.dictObj, "VarStore"): + varStoreData = self.dictObj.VarStore + varStoreComp = VarStoreCompiler(varStoreData, self) + children.append(varStoreComp) + if hasattr(self.dictObj, "FDSelect"): + # I have not yet supported merging a ttx CFF-CID font, as there are + # interesting issues about merging the FDArrays. Here I assume that + # either the font was read from XML, and the FDSelect indices are all + # in the charstring data, or the FDSelect array is already fully defined. + fdSelect = self.dictObj.FDSelect + # probably read in from XML; assume fdIndex in CharString data + if len(fdSelect) == 0: + charStrings = self.dictObj.CharStrings + for name in self.dictObj.charset: + fdSelect.append(charStrings[name].fdSelectIndex) + fdSelectComp = FDSelectCompiler(fdSelect, self) + children.append(fdSelectComp) + if hasattr(self.dictObj, "CharStrings"): + items = [] + charStrings = self.dictObj.CharStrings + for name in self.dictObj.charset: + items.append(charStrings[name]) + charStringsComp = CharStringsCompiler(items, strings, self, isCFF2=isCFF2) + children.append(charStringsComp) + if hasattr(self.dictObj, "FDArray"): + # I have not yet supported merging a ttx CFF-CID font, as there are + # interesting issues about merging the FDArrays. Here I assume that the + # FDArray info is correct and complete. + fdArrayIndexComp = self.dictObj.FDArray.getCompiler(strings, self) + children.append(fdArrayIndexComp) + children.extend(fdArrayIndexComp.getChildren(strings)) + if hasattr(self.dictObj, "Private"): + privComp = self.dictObj.Private.getCompiler(strings, self) + children.append(privComp) + children.extend(privComp.getChildren(strings)) + return children + + +class FontDictCompiler(DictCompiler): + opcodes = buildOpcodeDict(topDictOperators) + + def __init__(self, dictObj, strings, parent, isCFF2=None): + super(FontDictCompiler, self).__init__(dictObj, strings, parent, isCFF2=isCFF2) + # + # We now take some effort to detect if there were any key/value pairs + # supplied that were ignored in the FontDict context, and issue a warning + # for those cases. + # + ignoredNames = [] + dictObj = self.dictObj + for name in sorted(set(dictObj.converters) - set(dictObj.order)): + if name in dictObj.rawDict: + # The font was directly read from binary. In this + # case, we want to report *all* "useless" key/value + # pairs that are in the font, not just the ones that + # are different from the default. + ignoredNames.append(name) + else: + # The font was probably read from a TTX file. We only + # warn about keys whos value is not the default. The + # ones that have the default value will not be written + # to binary anyway. + default = dictObj.defaults.get(name) + if default is not None: + conv = dictObj.converters[name] + default = conv.read(dictObj, default) + if getattr(dictObj, name, None) != default: + ignoredNames.append(name) + if ignoredNames: + log.warning( + "Some CFF FDArray/FontDict keys were ignored upon compile: " + + " ".join(sorted(ignoredNames)) + ) + + def getChildren(self, strings): + children = [] + if hasattr(self.dictObj, "Private"): + privComp = self.dictObj.Private.getCompiler(strings, self) + children.append(privComp) + children.extend(privComp.getChildren(strings)) + return children + + +class PrivateDictCompiler(DictCompiler): + maxBlendStack = maxStackLimit + opcodes = buildOpcodeDict(privateDictOperators) + + def setPos(self, pos, endPos): + size = endPos - pos + self.parent.rawDict["Private"] = size, pos + self.pos = pos + + def getChildren(self, strings): + children = [] + if hasattr(self.dictObj, "Subrs"): + children.append(self.dictObj.Subrs.getCompiler(strings, self)) + return children + + +class BaseDict(object): + def __init__(self, strings=None, file=None, offset=None, isCFF2=None): + assert (isCFF2 is None) == (file is None) + self.rawDict = {} + self.skipNames = [] + self.strings = strings + if file is None: + return + self._isCFF2 = isCFF2 + self.file = file + if offset is not None: + log.log(DEBUG, "loading %s at %s", self.__class__.__name__, offset) + self.offset = offset + + def decompile(self, data): + log.log(DEBUG, " length %s is %d", self.__class__.__name__, len(data)) + dec = self.decompilerClass(self.strings, self) + dec.decompile(data) + self.rawDict = dec.getDict() + self.postDecompile() + + def postDecompile(self): + pass + + def getCompiler(self, strings, parent, isCFF2=None): + return self.compilerClass(self, strings, parent, isCFF2=isCFF2) + + def __getattr__(self, name): + if name[:2] == name[-2:] == "__": + # to make deepcopy() and pickle.load() work, we need to signal with + # AttributeError that dunder methods like '__deepcopy__' or '__getstate__' + # aren't implemented. For more details, see: + # https://github.com/fonttools/fonttools/pull/1488 + raise AttributeError(name) + value = self.rawDict.get(name, None) + if value is None: + value = self.defaults.get(name) + if value is None: + raise AttributeError(name) + conv = self.converters[name] + value = conv.read(self, value) + setattr(self, name, value) + return value + + def toXML(self, xmlWriter): + for name in self.order: + if name in self.skipNames: + continue + value = getattr(self, name, None) + # XXX For "charset" we never skip calling xmlWrite even if the + # value is None, so we always write the following XML comment: + # + # + # + # Charset is None when 'CFF ' table is imported from XML into an + # empty TTFont(). By writing this comment all the time, we obtain + # the same XML output whether roundtripping XML-to-XML or + # dumping binary-to-XML + if value is None and name != "charset": + continue + conv = self.converters[name] + conv.xmlWrite(xmlWriter, name, value) + ignoredNames = set(self.rawDict) - set(self.order) + if ignoredNames: + xmlWriter.comment( + "some keys were ignored: %s" % " ".join(sorted(ignoredNames)) + ) + xmlWriter.newline() + + def fromXML(self, name, attrs, content): + conv = self.converters[name] + value = conv.xmlRead(name, attrs, content, self) + setattr(self, name, value) + + +class TopDict(BaseDict): + """The ``TopDict`` represents the top-level dictionary holding font + information. CFF2 tables contain a restricted set of top-level entries + as described `here `_, + but CFF tables may contain a wider range of information. This information + can be accessed through attributes or through the dictionary returned + through the ``rawDict`` property: + + .. code:: python + + font = tt["CFF "].cff[0] + font.FamilyName + # 'Linux Libertine O' + font.rawDict["FamilyName"] + # 'Linux Libertine O' + + More information is available in the CFF file's private dictionary, accessed + via the ``Private`` property: + + .. code:: python + + tt["CFF "].cff[0].Private.BlueValues + # [-15, 0, 515, 515, 666, 666] + + """ + + defaults = buildDefaults(topDictOperators) + converters = buildConverters(topDictOperators) + compilerClass = TopDictCompiler + order = buildOrder(topDictOperators) + decompilerClass = TopDictDecompiler + + def __init__( + self, + strings=None, + file=None, + offset=None, + GlobalSubrs=None, + cff2GetGlyphOrder=None, + isCFF2=None, + ): + super(TopDict, self).__init__(strings, file, offset, isCFF2=isCFF2) + self.cff2GetGlyphOrder = cff2GetGlyphOrder + self.GlobalSubrs = GlobalSubrs + if isCFF2: + self.defaults = buildDefaults(topDictOperators2) + self.charset = cff2GetGlyphOrder() + self.order = buildOrder(topDictOperators2) + else: + self.defaults = buildDefaults(topDictOperators) + self.order = buildOrder(topDictOperators) + + def getGlyphOrder(self): + """Returns a list of glyph names in the CFF font.""" + return self.charset + + def postDecompile(self): + offset = self.rawDict.get("CharStrings") + if offset is None: + return + # get the number of glyphs beforehand. + self.file.seek(offset) + if self._isCFF2: + self.numGlyphs = readCard32(self.file) + else: + self.numGlyphs = readCard16(self.file) + + def toXML(self, xmlWriter): + if hasattr(self, "CharStrings"): + self.decompileAllCharStrings() + if hasattr(self, "ROS"): + self.skipNames = ["Encoding"] + if not hasattr(self, "ROS") or not hasattr(self, "CharStrings"): + # these values have default values, but I only want them to show up + # in CID fonts. + self.skipNames = [ + "CIDFontVersion", + "CIDFontRevision", + "CIDFontType", + "CIDCount", + ] + BaseDict.toXML(self, xmlWriter) + + def decompileAllCharStrings(self): + # Make sure that all the Private Dicts have been instantiated. + for i, charString in enumerate(self.CharStrings.values()): + try: + charString.decompile() + except: + log.error("Error in charstring %s", i) + raise + + def recalcFontBBox(self): + fontBBox = None + for charString in self.CharStrings.values(): + bounds = charString.calcBounds(self.CharStrings) + if bounds is not None: + if fontBBox is not None: + fontBBox = unionRect(fontBBox, bounds) + else: + fontBBox = bounds + + if fontBBox is None: + self.FontBBox = self.defaults["FontBBox"][:] + else: + self.FontBBox = list(intRect(fontBBox)) + + +class FontDict(BaseDict): + # + # Since fonttools used to pass a lot of fields that are not relevant in the FDArray + # FontDict, there are 'ttx' files in the wild that contain all these. These got in + # the ttx files because fonttools writes explicit values for all the TopDict default + # values. These are not actually illegal in the context of an FDArray FontDict - you + # can legally, per spec, put any arbitrary key/value pair in a FontDict - but are + # useless since current major company CFF interpreters ignore anything but the set + # listed in this file. So, we just silently skip them. An exception is Weight: this + # is not used by any interpreter, but some foundries have asked that this be + # supported in FDArray FontDicts just to preserve information about the design when + # the font is being inspected. + # + # On top of that, there are fonts out there that contain such useless FontDict values. + # + # By subclassing TopDict, we *allow* all key/values from TopDict, both when reading + # from binary or when reading from XML, but by overriding `order` with a limited + # list of names, we ensure that only the useful names ever get exported to XML and + # ever get compiled into the binary font. + # + # We override compilerClass so we can warn about "useless" key/value pairs, either + # from the original binary font or from TTX input. + # + # See: + # - https://github.com/fonttools/fonttools/issues/740 + # - https://github.com/fonttools/fonttools/issues/601 + # - https://github.com/adobe-type-tools/afdko/issues/137 + # + defaults = {} + converters = buildConverters(topDictOperators) + compilerClass = FontDictCompiler + orderCFF = ["FontName", "FontMatrix", "Weight", "Private"] + orderCFF2 = ["Private"] + decompilerClass = TopDictDecompiler + + def __init__( + self, + strings=None, + file=None, + offset=None, + GlobalSubrs=None, + isCFF2=None, + vstore=None, + ): + super(FontDict, self).__init__(strings, file, offset, isCFF2=isCFF2) + self.vstore = vstore + self.setCFF2(isCFF2) + + def setCFF2(self, isCFF2): + # isCFF2 may be None. + if isCFF2: + self.order = self.orderCFF2 + self._isCFF2 = True + else: + self.order = self.orderCFF + self._isCFF2 = False + + +class PrivateDict(BaseDict): + defaults = buildDefaults(privateDictOperators) + converters = buildConverters(privateDictOperators) + order = buildOrder(privateDictOperators) + decompilerClass = PrivateDictDecompiler + compilerClass = PrivateDictCompiler + + def __init__(self, strings=None, file=None, offset=None, isCFF2=None, vstore=None): + super(PrivateDict, self).__init__(strings, file, offset, isCFF2=isCFF2) + self.vstore = vstore + if isCFF2: + self.defaults = buildDefaults(privateDictOperators2) + self.order = buildOrder(privateDictOperators2) + # Provide dummy values. This avoids needing to provide + # an isCFF2 state in a lot of places. + self.nominalWidthX = self.defaultWidthX = None + self._isCFF2 = True + else: + self.defaults = buildDefaults(privateDictOperators) + self.order = buildOrder(privateDictOperators) + self._isCFF2 = False + + @property + def in_cff2(self): + return self._isCFF2 + + def getNumRegions(self, vi=None): # called from misc/psCharStrings.py + # if getNumRegions is being called, we can assume that VarStore exists. + if vi is None: + if hasattr(self, "vsindex"): + vi = self.vsindex + else: + vi = 0 + numRegions = self.vstore.getNumRegions(vi) + return numRegions + + +class IndexedStrings(object): + """SID -> string mapping.""" + + def __init__(self, file=None): + if file is None: + strings = [] + else: + strings = [tostr(s, encoding="latin1") for s in Index(file, isCFF2=False)] + self.strings = strings + + def getCompiler(self): + return IndexedStringsCompiler(self, None, self, isCFF2=False) + + def __len__(self): + return len(self.strings) + + def __getitem__(self, SID): + if SID < cffStandardStringCount: + return cffStandardStrings[SID] + else: + return self.strings[SID - cffStandardStringCount] + + def getSID(self, s): + if not hasattr(self, "stringMapping"): + self.buildStringMapping() + s = tostr(s, encoding="latin1") + if s in cffStandardStringMapping: + SID = cffStandardStringMapping[s] + elif s in self.stringMapping: + SID = self.stringMapping[s] + else: + SID = len(self.strings) + cffStandardStringCount + self.strings.append(s) + self.stringMapping[s] = SID + return SID + + def getStrings(self): + return self.strings + + def buildStringMapping(self): + self.stringMapping = {} + for index in range(len(self.strings)): + self.stringMapping[self.strings[index]] = index + cffStandardStringCount + + +# The 391 Standard Strings as used in the CFF format. +# from Adobe Technical None #5176, version 1.0, 18 March 1998 + +cffStandardStrings = [ + ".notdef", + "space", + "exclam", + "quotedbl", + "numbersign", + "dollar", + "percent", + "ampersand", + "quoteright", + "parenleft", + "parenright", + "asterisk", + "plus", + "comma", + "hyphen", + "period", + "slash", + "zero", + "one", + "two", + "three", + "four", + "five", + "six", + "seven", + "eight", + "nine", + "colon", + "semicolon", + "less", + "equal", + "greater", + "question", + "at", + "A", + "B", + "C", + "D", + "E", + "F", + "G", + "H", + "I", + "J", + "K", + "L", + "M", + "N", + "O", + "P", + "Q", + "R", + "S", + "T", + "U", + "V", + "W", + "X", + "Y", + "Z", + "bracketleft", + "backslash", + "bracketright", + "asciicircum", + "underscore", + "quoteleft", + "a", + "b", + "c", + "d", + "e", + "f", + "g", + "h", + "i", + "j", + "k", + "l", + "m", + "n", + "o", + "p", + "q", + "r", + "s", + "t", + "u", + "v", + "w", + "x", + "y", + "z", + "braceleft", + "bar", + "braceright", + "asciitilde", + "exclamdown", + "cent", + "sterling", + "fraction", + "yen", + "florin", + "section", + "currency", + "quotesingle", + "quotedblleft", + "guillemotleft", + "guilsinglleft", + "guilsinglright", + "fi", + "fl", + "endash", + "dagger", + "daggerdbl", + "periodcentered", + "paragraph", + "bullet", + "quotesinglbase", + "quotedblbase", + "quotedblright", + "guillemotright", + "ellipsis", + "perthousand", + "questiondown", + "grave", + "acute", + "circumflex", + "tilde", + "macron", + "breve", + "dotaccent", + "dieresis", + "ring", + "cedilla", + "hungarumlaut", + "ogonek", + "caron", + "emdash", + "AE", + "ordfeminine", + "Lslash", + "Oslash", + "OE", + "ordmasculine", + "ae", + "dotlessi", + "lslash", + "oslash", + "oe", + "germandbls", + "onesuperior", + "logicalnot", + "mu", + "trademark", + "Eth", + "onehalf", + "plusminus", + "Thorn", + "onequarter", + "divide", + "brokenbar", + "degree", + "thorn", + "threequarters", + "twosuperior", + "registered", + "minus", + "eth", + "multiply", + "threesuperior", + "copyright", + "Aacute", + "Acircumflex", + "Adieresis", + "Agrave", + "Aring", + "Atilde", + "Ccedilla", + "Eacute", + "Ecircumflex", + "Edieresis", + "Egrave", + "Iacute", + "Icircumflex", + "Idieresis", + "Igrave", + "Ntilde", + "Oacute", + "Ocircumflex", + "Odieresis", + "Ograve", + "Otilde", + "Scaron", + "Uacute", + "Ucircumflex", + "Udieresis", + "Ugrave", + "Yacute", + "Ydieresis", + "Zcaron", + "aacute", + "acircumflex", + "adieresis", + "agrave", + "aring", + "atilde", + "ccedilla", + "eacute", + "ecircumflex", + "edieresis", + "egrave", + "iacute", + "icircumflex", + "idieresis", + "igrave", + "ntilde", + "oacute", + "ocircumflex", + "odieresis", + "ograve", + "otilde", + "scaron", + "uacute", + "ucircumflex", + "udieresis", + "ugrave", + "yacute", + "ydieresis", + "zcaron", + "exclamsmall", + "Hungarumlautsmall", + "dollaroldstyle", + "dollarsuperior", + "ampersandsmall", + "Acutesmall", + "parenleftsuperior", + "parenrightsuperior", + "twodotenleader", + "onedotenleader", + "zerooldstyle", + "oneoldstyle", + "twooldstyle", + "threeoldstyle", + "fouroldstyle", + "fiveoldstyle", + "sixoldstyle", + "sevenoldstyle", + "eightoldstyle", + "nineoldstyle", + "commasuperior", + "threequartersemdash", + "periodsuperior", + "questionsmall", + "asuperior", + "bsuperior", + "centsuperior", + "dsuperior", + "esuperior", + "isuperior", + "lsuperior", + "msuperior", + "nsuperior", + "osuperior", + "rsuperior", + "ssuperior", + "tsuperior", + "ff", + "ffi", + "ffl", + "parenleftinferior", + "parenrightinferior", + "Circumflexsmall", + "hyphensuperior", + "Gravesmall", + "Asmall", + "Bsmall", + "Csmall", + "Dsmall", + "Esmall", + "Fsmall", + "Gsmall", + "Hsmall", + "Ismall", + "Jsmall", + "Ksmall", + "Lsmall", + "Msmall", + "Nsmall", + "Osmall", + "Psmall", + "Qsmall", + "Rsmall", + "Ssmall", + "Tsmall", + "Usmall", + "Vsmall", + "Wsmall", + "Xsmall", + "Ysmall", + "Zsmall", + "colonmonetary", + "onefitted", + "rupiah", + "Tildesmall", + "exclamdownsmall", + "centoldstyle", + "Lslashsmall", + "Scaronsmall", + "Zcaronsmall", + "Dieresissmall", + "Brevesmall", + "Caronsmall", + "Dotaccentsmall", + "Macronsmall", + "figuredash", + "hypheninferior", + "Ogoneksmall", + "Ringsmall", + "Cedillasmall", + "questiondownsmall", + "oneeighth", + "threeeighths", + "fiveeighths", + "seveneighths", + "onethird", + "twothirds", + "zerosuperior", + "foursuperior", + "fivesuperior", + "sixsuperior", + "sevensuperior", + "eightsuperior", + "ninesuperior", + "zeroinferior", + "oneinferior", + "twoinferior", + "threeinferior", + "fourinferior", + "fiveinferior", + "sixinferior", + "seveninferior", + "eightinferior", + "nineinferior", + "centinferior", + "dollarinferior", + "periodinferior", + "commainferior", + "Agravesmall", + "Aacutesmall", + "Acircumflexsmall", + "Atildesmall", + "Adieresissmall", + "Aringsmall", + "AEsmall", + "Ccedillasmall", + "Egravesmall", + "Eacutesmall", + "Ecircumflexsmall", + "Edieresissmall", + "Igravesmall", + "Iacutesmall", + "Icircumflexsmall", + "Idieresissmall", + "Ethsmall", + "Ntildesmall", + "Ogravesmall", + "Oacutesmall", + "Ocircumflexsmall", + "Otildesmall", + "Odieresissmall", + "OEsmall", + "Oslashsmall", + "Ugravesmall", + "Uacutesmall", + "Ucircumflexsmall", + "Udieresissmall", + "Yacutesmall", + "Thornsmall", + "Ydieresissmall", + "001.000", + "001.001", + "001.002", + "001.003", + "Black", + "Bold", + "Book", + "Light", + "Medium", + "Regular", + "Roman", + "Semibold", +] + +cffStandardStringCount = 391 +assert len(cffStandardStrings) == cffStandardStringCount +# build reverse mapping +cffStandardStringMapping = {} +for _i in range(cffStandardStringCount): + cffStandardStringMapping[cffStandardStrings[_i]] = _i + +cffISOAdobeStrings = [ + ".notdef", + "space", + "exclam", + "quotedbl", + "numbersign", + "dollar", + "percent", + "ampersand", + "quoteright", + "parenleft", + "parenright", + "asterisk", + "plus", + "comma", + "hyphen", + "period", + "slash", + "zero", + "one", + "two", + "three", + "four", + "five", + "six", + "seven", + "eight", + "nine", + "colon", + "semicolon", + "less", + "equal", + "greater", + "question", + "at", + "A", + "B", + "C", + "D", + "E", + "F", + "G", + "H", + "I", + "J", + "K", + "L", + "M", + "N", + "O", + "P", + "Q", + "R", + "S", + "T", + "U", + "V", + "W", + "X", + "Y", + "Z", + "bracketleft", + "backslash", + "bracketright", + "asciicircum", + "underscore", + "quoteleft", + "a", + "b", + "c", + "d", + "e", + "f", + "g", + "h", + "i", + "j", + "k", + "l", + "m", + "n", + "o", + "p", + "q", + "r", + "s", + "t", + "u", + "v", + "w", + "x", + "y", + "z", + "braceleft", + "bar", + "braceright", + "asciitilde", + "exclamdown", + "cent", + "sterling", + "fraction", + "yen", + "florin", + "section", + "currency", + "quotesingle", + "quotedblleft", + "guillemotleft", + "guilsinglleft", + "guilsinglright", + "fi", + "fl", + "endash", + "dagger", + "daggerdbl", + "periodcentered", + "paragraph", + "bullet", + "quotesinglbase", + "quotedblbase", + "quotedblright", + "guillemotright", + "ellipsis", + "perthousand", + "questiondown", + "grave", + "acute", + "circumflex", + "tilde", + "macron", + "breve", + "dotaccent", + "dieresis", + "ring", + "cedilla", + "hungarumlaut", + "ogonek", + "caron", + "emdash", + "AE", + "ordfeminine", + "Lslash", + "Oslash", + "OE", + "ordmasculine", + "ae", + "dotlessi", + "lslash", + "oslash", + "oe", + "germandbls", + "onesuperior", + "logicalnot", + "mu", + "trademark", + "Eth", + "onehalf", + "plusminus", + "Thorn", + "onequarter", + "divide", + "brokenbar", + "degree", + "thorn", + "threequarters", + "twosuperior", + "registered", + "minus", + "eth", + "multiply", + "threesuperior", + "copyright", + "Aacute", + "Acircumflex", + "Adieresis", + "Agrave", + "Aring", + "Atilde", + "Ccedilla", + "Eacute", + "Ecircumflex", + "Edieresis", + "Egrave", + "Iacute", + "Icircumflex", + "Idieresis", + "Igrave", + "Ntilde", + "Oacute", + "Ocircumflex", + "Odieresis", + "Ograve", + "Otilde", + "Scaron", + "Uacute", + "Ucircumflex", + "Udieresis", + "Ugrave", + "Yacute", + "Ydieresis", + "Zcaron", + "aacute", + "acircumflex", + "adieresis", + "agrave", + "aring", + "atilde", + "ccedilla", + "eacute", + "ecircumflex", + "edieresis", + "egrave", + "iacute", + "icircumflex", + "idieresis", + "igrave", + "ntilde", + "oacute", + "ocircumflex", + "odieresis", + "ograve", + "otilde", + "scaron", + "uacute", + "ucircumflex", + "udieresis", + "ugrave", + "yacute", + "ydieresis", + "zcaron", +] + +cffISOAdobeStringCount = 229 +assert len(cffISOAdobeStrings) == cffISOAdobeStringCount + +cffIExpertStrings = [ + ".notdef", + "space", + "exclamsmall", + "Hungarumlautsmall", + "dollaroldstyle", + "dollarsuperior", + "ampersandsmall", + "Acutesmall", + "parenleftsuperior", + "parenrightsuperior", + "twodotenleader", + "onedotenleader", + "comma", + "hyphen", + "period", + "fraction", + "zerooldstyle", + "oneoldstyle", + "twooldstyle", + "threeoldstyle", + "fouroldstyle", + "fiveoldstyle", + "sixoldstyle", + "sevenoldstyle", + "eightoldstyle", + "nineoldstyle", + "colon", + "semicolon", + "commasuperior", + "threequartersemdash", + "periodsuperior", + "questionsmall", + "asuperior", + "bsuperior", + "centsuperior", + "dsuperior", + "esuperior", + "isuperior", + "lsuperior", + "msuperior", + "nsuperior", + "osuperior", + "rsuperior", + "ssuperior", + "tsuperior", + "ff", + "fi", + "fl", + "ffi", + "ffl", + "parenleftinferior", + "parenrightinferior", + "Circumflexsmall", + "hyphensuperior", + "Gravesmall", + "Asmall", + "Bsmall", + "Csmall", + "Dsmall", + "Esmall", + "Fsmall", + "Gsmall", + "Hsmall", + "Ismall", + "Jsmall", + "Ksmall", + "Lsmall", + "Msmall", + "Nsmall", + "Osmall", + "Psmall", + "Qsmall", + "Rsmall", + "Ssmall", + "Tsmall", + "Usmall", + "Vsmall", + "Wsmall", + "Xsmall", + "Ysmall", + "Zsmall", + "colonmonetary", + "onefitted", + "rupiah", + "Tildesmall", + "exclamdownsmall", + "centoldstyle", + "Lslashsmall", + "Scaronsmall", + "Zcaronsmall", + "Dieresissmall", + "Brevesmall", + "Caronsmall", + "Dotaccentsmall", + "Macronsmall", + "figuredash", + "hypheninferior", + "Ogoneksmall", + "Ringsmall", + "Cedillasmall", + "onequarter", + "onehalf", + "threequarters", + "questiondownsmall", + "oneeighth", + "threeeighths", + "fiveeighths", + "seveneighths", + "onethird", + "twothirds", + "zerosuperior", + "onesuperior", + "twosuperior", + "threesuperior", + "foursuperior", + "fivesuperior", + "sixsuperior", + "sevensuperior", + "eightsuperior", + "ninesuperior", + "zeroinferior", + "oneinferior", + "twoinferior", + "threeinferior", + "fourinferior", + "fiveinferior", + "sixinferior", + "seveninferior", + "eightinferior", + "nineinferior", + "centinferior", + "dollarinferior", + "periodinferior", + "commainferior", + "Agravesmall", + "Aacutesmall", + "Acircumflexsmall", + "Atildesmall", + "Adieresissmall", + "Aringsmall", + "AEsmall", + "Ccedillasmall", + "Egravesmall", + "Eacutesmall", + "Ecircumflexsmall", + "Edieresissmall", + "Igravesmall", + "Iacutesmall", + "Icircumflexsmall", + "Idieresissmall", + "Ethsmall", + "Ntildesmall", + "Ogravesmall", + "Oacutesmall", + "Ocircumflexsmall", + "Otildesmall", + "Odieresissmall", + "OEsmall", + "Oslashsmall", + "Ugravesmall", + "Uacutesmall", + "Ucircumflexsmall", + "Udieresissmall", + "Yacutesmall", + "Thornsmall", + "Ydieresissmall", +] + +cffExpertStringCount = 166 +assert len(cffIExpertStrings) == cffExpertStringCount + +cffExpertSubsetStrings = [ + ".notdef", + "space", + "dollaroldstyle", + "dollarsuperior", + "parenleftsuperior", + "parenrightsuperior", + "twodotenleader", + "onedotenleader", + "comma", + "hyphen", + "period", + "fraction", + "zerooldstyle", + "oneoldstyle", + "twooldstyle", + "threeoldstyle", + "fouroldstyle", + "fiveoldstyle", + "sixoldstyle", + "sevenoldstyle", + "eightoldstyle", + "nineoldstyle", + "colon", + "semicolon", + "commasuperior", + "threequartersemdash", + "periodsuperior", + "asuperior", + "bsuperior", + "centsuperior", + "dsuperior", + "esuperior", + "isuperior", + "lsuperior", + "msuperior", + "nsuperior", + "osuperior", + "rsuperior", + "ssuperior", + "tsuperior", + "ff", + "fi", + "fl", + "ffi", + "ffl", + "parenleftinferior", + "parenrightinferior", + "hyphensuperior", + "colonmonetary", + "onefitted", + "rupiah", + "centoldstyle", + "figuredash", + "hypheninferior", + "onequarter", + "onehalf", + "threequarters", + "oneeighth", + "threeeighths", + "fiveeighths", + "seveneighths", + "onethird", + "twothirds", + "zerosuperior", + "onesuperior", + "twosuperior", + "threesuperior", + "foursuperior", + "fivesuperior", + "sixsuperior", + "sevensuperior", + "eightsuperior", + "ninesuperior", + "zeroinferior", + "oneinferior", + "twoinferior", + "threeinferior", + "fourinferior", + "fiveinferior", + "sixinferior", + "seveninferior", + "eightinferior", + "nineinferior", + "centinferior", + "dollarinferior", + "periodinferior", + "commainferior", +] + +cffExpertSubsetStringCount = 87 +assert len(cffExpertSubsetStrings) == cffExpertSubsetStringCount diff --git a/py311/lib/python3.11/site-packages/fontTools/cffLib/specializer.py b/py311/lib/python3.11/site-packages/fontTools/cffLib/specializer.py new file mode 100644 index 0000000000000000000000000000000000000000..974060c40ec21a9c9ede2db0a34d0e60e4b42cf9 --- /dev/null +++ b/py311/lib/python3.11/site-packages/fontTools/cffLib/specializer.py @@ -0,0 +1,927 @@ +# -*- coding: utf-8 -*- + +"""T2CharString operator specializer and generalizer. + +PostScript glyph drawing operations can be expressed in multiple different +ways. For example, as well as the ``lineto`` operator, there is also a +``hlineto`` operator which draws a horizontal line, removing the need to +specify a ``dx`` coordinate, and a ``vlineto`` operator which draws a +vertical line, removing the need to specify a ``dy`` coordinate. As well +as decompiling :class:`fontTools.misc.psCharStrings.T2CharString` objects +into lists of operations, this module allows for conversion between general +and specific forms of the operation. + +""" + +from fontTools.cffLib import maxStackLimit + + +def stringToProgram(string): + if isinstance(string, str): + string = string.split() + program = [] + for token in string: + try: + token = int(token) + except ValueError: + try: + token = float(token) + except ValueError: + pass + program.append(token) + return program + + +def programToString(program): + return " ".join(str(x) for x in program) + + +def programToCommands(program, getNumRegions=None): + """Takes a T2CharString program list and returns list of commands. + Each command is a two-tuple of commandname,arg-list. The commandname might + be empty string if no commandname shall be emitted (used for glyph width, + hintmask/cntrmask argument, as well as stray arguments at the end of the + program (🤷). + 'getNumRegions' may be None, or a callable object. It must return the + number of regions. 'getNumRegions' takes a single argument, vsindex. It + returns the numRegions for the vsindex. + The Charstring may or may not start with a width value. If the first + non-blend operator has an odd number of arguments, then the first argument is + a width, and is popped off. This is complicated with blend operators, as + there may be more than one before the first hint or moveto operator, and each + one reduces several arguments to just one list argument. We have to sum the + number of arguments that are not part of the blend arguments, and all the + 'numBlends' values. We could instead have said that by definition, if there + is a blend operator, there is no width value, since CFF2 Charstrings don't + have width values. I discussed this with Behdad, and we are allowing for an + initial width value in this case because developers may assemble a CFF2 + charstring from CFF Charstrings, which could have width values. + """ + + seenWidthOp = False + vsIndex = 0 + lenBlendStack = 0 + lastBlendIndex = 0 + commands = [] + stack = [] + it = iter(program) + + for token in it: + if not isinstance(token, str): + stack.append(token) + continue + + if token == "blend": + assert getNumRegions is not None + numSourceFonts = 1 + getNumRegions(vsIndex) + # replace the blend op args on the stack with a single list + # containing all the blend op args. + numBlends = stack[-1] + numBlendArgs = numBlends * numSourceFonts + 1 + # replace first blend op by a list of the blend ops. + stack[-numBlendArgs:] = [stack[-numBlendArgs:]] + lenStack = len(stack) + lenBlendStack += numBlends + lenStack - 1 + lastBlendIndex = lenStack + # if a blend op exists, this is or will be a CFF2 charstring. + continue + + elif token == "vsindex": + vsIndex = stack[-1] + assert type(vsIndex) is int + + elif (not seenWidthOp) and token in { + "hstem", + "hstemhm", + "vstem", + "vstemhm", + "cntrmask", + "hintmask", + "hmoveto", + "vmoveto", + "rmoveto", + "endchar", + }: + seenWidthOp = True + parity = token in {"hmoveto", "vmoveto"} + if lenBlendStack: + # lenBlendStack has the number of args represented by the last blend + # arg and all the preceding args. We need to now add the number of + # args following the last blend arg. + numArgs = lenBlendStack + len(stack[lastBlendIndex:]) + else: + numArgs = len(stack) + if numArgs and (numArgs % 2) ^ parity: + width = stack.pop(0) + commands.append(("", [width])) + + if token in {"hintmask", "cntrmask"}: + if stack: + commands.append(("", stack)) + commands.append((token, [])) + commands.append(("", [next(it)])) + else: + commands.append((token, stack)) + stack = [] + if stack: + commands.append(("", stack)) + return commands + + +def _flattenBlendArgs(args): + token_list = [] + for arg in args: + if isinstance(arg, list): + token_list.extend(arg) + token_list.append("blend") + else: + token_list.append(arg) + return token_list + + +def commandsToProgram(commands): + """Takes a commands list as returned by programToCommands() and converts + it back to a T2CharString program list.""" + program = [] + for op, args in commands: + if any(isinstance(arg, list) for arg in args): + args = _flattenBlendArgs(args) + program.extend(args) + if op: + program.append(op) + return program + + +def _everyN(el, n): + """Group the list el into groups of size n""" + l = len(el) + if l % n != 0: + raise ValueError(el) + for i in range(0, l, n): + yield el[i : i + n] + + +class _GeneralizerDecombinerCommandsMap(object): + @staticmethod + def rmoveto(args): + if len(args) != 2: + raise ValueError(args) + yield ("rmoveto", args) + + @staticmethod + def hmoveto(args): + if len(args) != 1: + raise ValueError(args) + yield ("rmoveto", [args[0], 0]) + + @staticmethod + def vmoveto(args): + if len(args) != 1: + raise ValueError(args) + yield ("rmoveto", [0, args[0]]) + + @staticmethod + def rlineto(args): + if not args: + raise ValueError(args) + for args in _everyN(args, 2): + yield ("rlineto", args) + + @staticmethod + def hlineto(args): + if not args: + raise ValueError(args) + it = iter(args) + try: + while True: + yield ("rlineto", [next(it), 0]) + yield ("rlineto", [0, next(it)]) + except StopIteration: + pass + + @staticmethod + def vlineto(args): + if not args: + raise ValueError(args) + it = iter(args) + try: + while True: + yield ("rlineto", [0, next(it)]) + yield ("rlineto", [next(it), 0]) + except StopIteration: + pass + + @staticmethod + def rrcurveto(args): + if not args: + raise ValueError(args) + for args in _everyN(args, 6): + yield ("rrcurveto", args) + + @staticmethod + def hhcurveto(args): + l = len(args) + if l < 4 or l % 4 > 1: + raise ValueError(args) + if l % 2 == 1: + yield ("rrcurveto", [args[1], args[0], args[2], args[3], args[4], 0]) + args = args[5:] + for args in _everyN(args, 4): + yield ("rrcurveto", [args[0], 0, args[1], args[2], args[3], 0]) + + @staticmethod + def vvcurveto(args): + l = len(args) + if l < 4 or l % 4 > 1: + raise ValueError(args) + if l % 2 == 1: + yield ("rrcurveto", [args[0], args[1], args[2], args[3], 0, args[4]]) + args = args[5:] + for args in _everyN(args, 4): + yield ("rrcurveto", [0, args[0], args[1], args[2], 0, args[3]]) + + @staticmethod + def hvcurveto(args): + l = len(args) + if l < 4 or l % 8 not in {0, 1, 4, 5}: + raise ValueError(args) + last_args = None + if l % 2 == 1: + lastStraight = l % 8 == 5 + args, last_args = args[:-5], args[-5:] + it = _everyN(args, 4) + try: + while True: + args = next(it) + yield ("rrcurveto", [args[0], 0, args[1], args[2], 0, args[3]]) + args = next(it) + yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], 0]) + except StopIteration: + pass + if last_args: + args = last_args + if lastStraight: + yield ("rrcurveto", [args[0], 0, args[1], args[2], args[4], args[3]]) + else: + yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], args[4]]) + + @staticmethod + def vhcurveto(args): + l = len(args) + if l < 4 or l % 8 not in {0, 1, 4, 5}: + raise ValueError(args) + last_args = None + if l % 2 == 1: + lastStraight = l % 8 == 5 + args, last_args = args[:-5], args[-5:] + it = _everyN(args, 4) + try: + while True: + args = next(it) + yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], 0]) + args = next(it) + yield ("rrcurveto", [args[0], 0, args[1], args[2], 0, args[3]]) + except StopIteration: + pass + if last_args: + args = last_args + if lastStraight: + yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], args[4]]) + else: + yield ("rrcurveto", [args[0], 0, args[1], args[2], args[4], args[3]]) + + @staticmethod + def rcurveline(args): + l = len(args) + if l < 8 or l % 6 != 2: + raise ValueError(args) + args, last_args = args[:-2], args[-2:] + for args in _everyN(args, 6): + yield ("rrcurveto", args) + yield ("rlineto", last_args) + + @staticmethod + def rlinecurve(args): + l = len(args) + if l < 8 or l % 2 != 0: + raise ValueError(args) + args, last_args = args[:-6], args[-6:] + for args in _everyN(args, 2): + yield ("rlineto", args) + yield ("rrcurveto", last_args) + + +def _convertBlendOpToArgs(blendList): + # args is list of blend op args. Since we are supporting + # recursive blend op calls, some of these args may also + # be a list of blend op args, and need to be converted before + # we convert the current list. + if any([isinstance(arg, list) for arg in blendList]): + args = [ + i + for e in blendList + for i in (_convertBlendOpToArgs(e) if isinstance(e, list) else [e]) + ] + else: + args = blendList + + # We now know that blendList contains a blend op argument list, even if + # some of the args are lists that each contain a blend op argument list. + # Convert from: + # [default font arg sequence x0,...,xn] + [delta tuple for x0] + ... + [delta tuple for xn] + # to: + # [ [x0] + [delta tuple for x0], + # ..., + # [xn] + [delta tuple for xn] ] + numBlends = args[-1] + # Can't use args.pop() when the args are being used in a nested list + # comprehension. See calling context + args = args[:-1] + + l = len(args) + numRegions = l // numBlends - 1 + if not (numBlends * (numRegions + 1) == l): + raise ValueError(blendList) + + defaultArgs = [[arg] for arg in args[:numBlends]] + deltaArgs = args[numBlends:] + numDeltaValues = len(deltaArgs) + deltaList = [ + deltaArgs[i : i + numRegions] for i in range(0, numDeltaValues, numRegions) + ] + blend_args = [a + b + [1] for a, b in zip(defaultArgs, deltaList)] + return blend_args + + +def generalizeCommands(commands, ignoreErrors=False): + result = [] + mapping = _GeneralizerDecombinerCommandsMap + for op, args in commands: + # First, generalize any blend args in the arg list. + if any([isinstance(arg, list) for arg in args]): + try: + args = [ + n + for arg in args + for n in ( + _convertBlendOpToArgs(arg) if isinstance(arg, list) else [arg] + ) + ] + except ValueError: + if ignoreErrors: + # Store op as data, such that consumers of commands do not have to + # deal with incorrect number of arguments. + result.append(("", args)) + result.append(("", [op])) + else: + raise + + func = getattr(mapping, op, None) + if func is None: + result.append((op, args)) + continue + try: + for command in func(args): + result.append(command) + except ValueError: + if ignoreErrors: + # Store op as data, such that consumers of commands do not have to + # deal with incorrect number of arguments. + result.append(("", args)) + result.append(("", [op])) + else: + raise + return result + + +def generalizeProgram(program, getNumRegions=None, **kwargs): + return commandsToProgram( + generalizeCommands(programToCommands(program, getNumRegions), **kwargs) + ) + + +def _categorizeVector(v): + """ + Takes X,Y vector v and returns one of r, h, v, or 0 depending on which + of X and/or Y are zero, plus tuple of nonzero ones. If both are zero, + it returns a single zero still. + + >>> _categorizeVector((0,0)) + ('0', (0,)) + >>> _categorizeVector((1,0)) + ('h', (1,)) + >>> _categorizeVector((0,2)) + ('v', (2,)) + >>> _categorizeVector((1,2)) + ('r', (1, 2)) + """ + if not v[0]: + if not v[1]: + return "0", v[:1] + else: + return "v", v[1:] + else: + if not v[1]: + return "h", v[:1] + else: + return "r", v + + +def _mergeCategories(a, b): + if a == "0": + return b + if b == "0": + return a + if a == b: + return a + return None + + +def _negateCategory(a): + if a == "h": + return "v" + if a == "v": + return "h" + assert a in "0r" + return a + + +def _convertToBlendCmds(args): + # return a list of blend commands, and + # the remaining non-blended args, if any. + num_args = len(args) + stack_use = 0 + new_args = [] + i = 0 + while i < num_args: + arg = args[i] + i += 1 + if not isinstance(arg, list): + new_args.append(arg) + stack_use += 1 + else: + prev_stack_use = stack_use + # The arg is a tuple of blend values. + # These are each (master 0,delta 1..delta n, 1) + # Combine as many successive tuples as we can, + # up to the max stack limit. + num_sources = len(arg) - 1 + blendlist = [arg] + stack_use += 1 + num_sources # 1 for the num_blends arg + + # if we are here, max stack is the CFF2 max stack. + # I use the CFF2 max stack limit here rather than + # the 'maxstack' chosen by the client, as the default + # maxstack may have been used unintentionally. For all + # the other operators, this just produces a little less + # optimization, but here it puts a hard (and low) limit + # on the number of source fonts that can be used. + # + # Make sure the stack depth does not exceed (maxstack - 1), so + # that subroutinizer can insert subroutine calls at any point. + while ( + (i < num_args) + and isinstance(args[i], list) + and stack_use + num_sources < maxStackLimit + ): + blendlist.append(args[i]) + i += 1 + stack_use += num_sources + # blendList now contains as many single blend tuples as can be + # combined without exceeding the CFF2 stack limit. + num_blends = len(blendlist) + # append the 'num_blends' default font values + blend_args = [] + for arg in blendlist: + blend_args.append(arg[0]) + for arg in blendlist: + assert arg[-1] == 1 + blend_args.extend(arg[1:-1]) + blend_args.append(num_blends) + new_args.append(blend_args) + stack_use = prev_stack_use + num_blends + + return new_args + + +def _addArgs(a, b): + if isinstance(b, list): + if isinstance(a, list): + if len(a) != len(b) or a[-1] != b[-1]: + raise ValueError() + return [_addArgs(va, vb) for va, vb in zip(a[:-1], b[:-1])] + [a[-1]] + else: + a, b = b, a + if isinstance(a, list): + assert a[-1] == 1 + return [_addArgs(a[0], b)] + a[1:] + return a + b + + +def _argsStackUse(args): + stackLen = 0 + maxLen = 0 + for arg in args: + if type(arg) is list: + # Blended arg + maxLen = max(maxLen, stackLen + _argsStackUse(arg)) + stackLen += arg[-1] + else: + stackLen += 1 + return max(stackLen, maxLen) + + +def specializeCommands( + commands, + ignoreErrors=False, + generalizeFirst=True, + preserveTopology=False, + maxstack=48, +): + # We perform several rounds of optimizations. They are carefully ordered and are: + # + # 0. Generalize commands. + # This ensures that they are in our expected simple form, with each line/curve only + # having arguments for one segment, and using the generic form (rlineto/rrcurveto). + # If caller is sure the input is in this form, they can turn off generalization to + # save time. + # + # 1. Combine successive rmoveto operations. + # + # 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants. + # We specialize into some, made-up, variants as well, which simplifies following + # passes. + # + # 3. Merge or delete redundant operations, to the extent requested. + # OpenType spec declares point numbers in CFF undefined. As such, we happily + # change topology. If client relies on point numbers (in GPOS anchors, or for + # hinting purposes(what?)) they can turn this off. + # + # 4. Peephole optimization to revert back some of the h/v variants back into their + # original "relative" operator (rline/rrcurveto) if that saves a byte. + # + # 5. Combine adjacent operators when possible, minding not to go over max stack size. + # + # 6. Resolve any remaining made-up operators into real operators. + # + # I have convinced myself that this produces optimal bytecode (except for, possibly + # one byte each time maxstack size prohibits combining.) YMMV, but you'd be wrong. :-) + # A dynamic-programming approach can do the same but would be significantly slower. + # + # 7. For any args which are blend lists, convert them to a blend command. + + # 0. Generalize commands. + if generalizeFirst: + commands = generalizeCommands(commands, ignoreErrors=ignoreErrors) + else: + commands = list(commands) # Make copy since we modify in-place later. + + # 1. Combine successive rmoveto operations. + for i in range(len(commands) - 1, 0, -1): + if "rmoveto" == commands[i][0] == commands[i - 1][0]: + v1, v2 = commands[i - 1][1], commands[i][1] + commands[i - 1] = ( + "rmoveto", + [_addArgs(v1[0], v2[0]), _addArgs(v1[1], v2[1])], + ) + del commands[i] + + # 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants. + # + # We, in fact, specialize into more, made-up, variants that special-case when both + # X and Y components are zero. This simplifies the following optimization passes. + # This case is rare, but OCD does not let me skip it. + # + # After this round, we will have four variants that use the following mnemonics: + # + # - 'r' for relative, ie. non-zero X and non-zero Y, + # - 'h' for horizontal, ie. zero X and non-zero Y, + # - 'v' for vertical, ie. non-zero X and zero Y, + # - '0' for zeros, ie. zero X and zero Y. + # + # The '0' pseudo-operators are not part of the spec, but help simplify the following + # optimization rounds. We resolve them at the end. So, after this, we will have four + # moveto and four lineto variants: + # + # - 0moveto, 0lineto + # - hmoveto, hlineto + # - vmoveto, vlineto + # - rmoveto, rlineto + # + # and sixteen curveto variants. For example, a '0hcurveto' operator means a curve + # dx0,dy0,dx1,dy1,dx2,dy2,dx3,dy3 where dx0, dx1, and dy3 are zero but not dx3. + # An 'rvcurveto' means dx3 is zero but not dx0,dy0,dy3. + # + # There are nine different variants of curves without the '0'. Those nine map exactly + # to the existing curve variants in the spec: rrcurveto, and the four variants hhcurveto, + # vvcurveto, hvcurveto, and vhcurveto each cover two cases, one with an odd number of + # arguments and one without. Eg. an hhcurveto with an extra argument (odd number of + # arguments) is in fact an rhcurveto. The operators in the spec are designed such that + # all four of rhcurveto, rvcurveto, hrcurveto, and vrcurveto are encodable for one curve. + # + # Of the curve types with '0', the 00curveto is equivalent to a lineto variant. The rest + # of the curve types with a 0 need to be encoded as a h or v variant. Ie. a '0' can be + # thought of a "don't care" and can be used as either an 'h' or a 'v'. As such, we always + # encode a number 0 as argument when we use a '0' variant. Later on, we can just substitute + # the '0' with either 'h' or 'v' and it works. + # + # When we get to curve splines however, things become more complicated... XXX finish this. + # There's one more complexity with splines. If one side of the spline is not horizontal or + # vertical (or zero), ie. if it's 'r', then it limits which spline types we can encode. + # Only hhcurveto and vvcurveto operators can encode a spline starting with 'r', and + # only hvcurveto and vhcurveto operators can encode a spline ending with 'r'. + # This limits our merge opportunities later. + # + for i in range(len(commands)): + op, args = commands[i] + + if op in {"rmoveto", "rlineto"}: + c, args = _categorizeVector(args) + commands[i] = c + op[1:], args + continue + + if op == "rrcurveto": + c1, args1 = _categorizeVector(args[:2]) + c2, args2 = _categorizeVector(args[-2:]) + commands[i] = c1 + c2 + "curveto", args1 + args[2:4] + args2 + continue + + # 3. Merge or delete redundant operations, to the extent requested. + # + # TODO + # A 0moveto that comes before all other path operations can be removed. + # though I find conflicting evidence for this. + # + # TODO + # "If hstem and vstem hints are both declared at the beginning of a + # CharString, and this sequence is followed directly by the hintmask or + # cntrmask operators, then the vstem hint operator (or, if applicable, + # the vstemhm operator) need not be included." + # + # "The sequence and form of a CFF2 CharString program may be represented as: + # {hs* vs* cm* hm* mt subpath}? {mt subpath}*" + # + # https://www.microsoft.com/typography/otspec/cff2charstr.htm#section3.1 + # + # For Type2 CharStrings the sequence is: + # w? {hs* vs* cm* hm* mt subpath}? {mt subpath}* endchar" + + # Some other redundancies change topology (point numbers). + if not preserveTopology: + for i in range(len(commands) - 1, -1, -1): + op, args = commands[i] + + # A 00curveto is demoted to a (specialized) lineto. + if op == "00curveto": + assert len(args) == 4 + c, args = _categorizeVector(args[1:3]) + op = c + "lineto" + commands[i] = op, args + # and then... + + # A 0lineto can be deleted. + if op == "0lineto": + del commands[i] + continue + + # Merge adjacent hlineto's and vlineto's. + # In CFF2 charstrings from variable fonts, each + # arg item may be a list of blendable values, one from + # each source font. + if i and op in {"hlineto", "vlineto"} and (op == commands[i - 1][0]): + _, other_args = commands[i - 1] + assert len(args) == 1 and len(other_args) == 1 + try: + new_args = [_addArgs(args[0], other_args[0])] + except ValueError: + continue + commands[i - 1] = (op, new_args) + del commands[i] + continue + + # 4. Peephole optimization to revert back some of the h/v variants back into their + # original "relative" operator (rline/rrcurveto) if that saves a byte. + for i in range(1, len(commands) - 1): + op, args = commands[i] + prv, nxt = commands[i - 1][0], commands[i + 1][0] + + if op in {"0lineto", "hlineto", "vlineto"} and prv == nxt == "rlineto": + assert len(args) == 1 + args = [0, args[0]] if op[0] == "v" else [args[0], 0] + commands[i] = ("rlineto", args) + continue + + if op[2:] == "curveto" and len(args) == 5 and prv == nxt == "rrcurveto": + assert (op[0] == "r") ^ (op[1] == "r") + if op[0] == "v": + pos = 0 + elif op[0] != "r": + pos = 1 + elif op[1] == "v": + pos = 4 + else: + pos = 5 + # Insert, while maintaining the type of args (can be tuple or list). + args = args[:pos] + type(args)((0,)) + args[pos:] + commands[i] = ("rrcurveto", args) + continue + + # 5. Combine adjacent operators when possible, minding not to go over max stack size. + stackUse = _argsStackUse(commands[-1][1]) if commands else 0 + for i in range(len(commands) - 1, 0, -1): + op1, args1 = commands[i - 1] + op2, args2 = commands[i] + new_op = None + + # Merge logic... + if {op1, op2} <= {"rlineto", "rrcurveto"}: + if op1 == op2: + new_op = op1 + else: + l = len(args2) + if op2 == "rrcurveto" and l == 6: + new_op = "rlinecurve" + elif l == 2: + new_op = "rcurveline" + + elif (op1, op2) in {("rlineto", "rlinecurve"), ("rrcurveto", "rcurveline")}: + new_op = op2 + + elif {op1, op2} == {"vlineto", "hlineto"}: + new_op = op1 + + elif "curveto" == op1[2:] == op2[2:]: + d0, d1 = op1[:2] + d2, d3 = op2[:2] + + if d1 == "r" or d2 == "r" or d0 == d3 == "r": + continue + + d = _mergeCategories(d1, d2) + if d is None: + continue + if d0 == "r": + d = _mergeCategories(d, d3) + if d is None: + continue + new_op = "r" + d + "curveto" + elif d3 == "r": + d0 = _mergeCategories(d0, _negateCategory(d)) + if d0 is None: + continue + new_op = d0 + "r" + "curveto" + else: + d0 = _mergeCategories(d0, d3) + if d0 is None: + continue + new_op = d0 + d + "curveto" + + # Make sure the stack depth does not exceed (maxstack - 1), so + # that subroutinizer can insert subroutine calls at any point. + args1StackUse = _argsStackUse(args1) + combinedStackUse = max(args1StackUse, len(args1) + stackUse) + if new_op and combinedStackUse < maxstack: + commands[i - 1] = (new_op, args1 + args2) + del commands[i] + stackUse = combinedStackUse + else: + stackUse = args1StackUse + + # 6. Resolve any remaining made-up operators into real operators. + for i in range(len(commands)): + op, args = commands[i] + + if op in {"0moveto", "0lineto"}: + commands[i] = "h" + op[1:], args + continue + + if op[2:] == "curveto" and op[:2] not in {"rr", "hh", "vv", "vh", "hv"}: + l = len(args) + + op0, op1 = op[:2] + if (op0 == "r") ^ (op1 == "r"): + assert l % 2 == 1 + if op0 == "0": + op0 = "h" + if op1 == "0": + op1 = "h" + if op0 == "r": + op0 = op1 + if op1 == "r": + op1 = _negateCategory(op0) + assert {op0, op1} <= {"h", "v"}, (op0, op1) + + if l % 2: + if op0 != op1: # vhcurveto / hvcurveto + if (op0 == "h") ^ (l % 8 == 1): + # Swap last two args order + args = args[:-2] + args[-1:] + args[-2:-1] + else: # hhcurveto / vvcurveto + if op0 == "h": # hhcurveto + # Swap first two args order + args = args[1:2] + args[:1] + args[2:] + + commands[i] = op0 + op1 + "curveto", args + continue + + # 7. For any series of args which are blend lists, convert the series to a single blend arg. + for i in range(len(commands)): + op, args = commands[i] + if any(isinstance(arg, list) for arg in args): + commands[i] = op, _convertToBlendCmds(args) + + return commands + + +def specializeProgram(program, getNumRegions=None, **kwargs): + return commandsToProgram( + specializeCommands(programToCommands(program, getNumRegions), **kwargs) + ) + + +if __name__ == "__main__": + import sys + + if len(sys.argv) == 1: + import doctest + + sys.exit(doctest.testmod().failed) + + import argparse + + parser = argparse.ArgumentParser( + "fonttools cffLib.specializer", + description="CFF CharString generalizer/specializer", + ) + parser.add_argument("program", metavar="command", nargs="*", help="Commands.") + parser.add_argument( + "--num-regions", + metavar="NumRegions", + nargs="*", + default=None, + help="Number of variable-font regions for blend opertaions.", + ) + parser.add_argument( + "--font", + metavar="FONTFILE", + default=None, + help="CFF2 font to specialize.", + ) + parser.add_argument( + "-o", + "--output-file", + type=str, + help="Output font file name.", + ) + + options = parser.parse_args(sys.argv[1:]) + + if options.program: + getNumRegions = ( + None + if options.num_regions is None + else lambda vsIndex: int( + options.num_regions[0 if vsIndex is None else vsIndex] + ) + ) + + program = stringToProgram(options.program) + print("Program:") + print(programToString(program)) + commands = programToCommands(program, getNumRegions) + print("Commands:") + print(commands) + program2 = commandsToProgram(commands) + print("Program from commands:") + print(programToString(program2)) + assert program == program2 + print("Generalized program:") + print(programToString(generalizeProgram(program, getNumRegions))) + print("Specialized program:") + print(programToString(specializeProgram(program, getNumRegions))) + + if options.font: + from fontTools.ttLib import TTFont + + font = TTFont(options.font) + cff2 = font["CFF2"].cff.topDictIndex[0] + charstrings = cff2.CharStrings + for glyphName in charstrings.keys(): + charstring = charstrings[glyphName] + charstring.decompile() + getNumRegions = charstring.private.getNumRegions + charstring.program = specializeProgram( + charstring.program, getNumRegions, maxstack=maxStackLimit + ) + + if options.output_file is None: + from fontTools.misc.cliTools import makeOutputFileName + + outfile = makeOutputFileName( + options.font, overWrite=True, suffix=".specialized" + ) + else: + outfile = options.output_file + if outfile: + print("Saving", outfile) + font.save(outfile) diff --git a/py311/lib/python3.11/site-packages/fontTools/cffLib/transforms.py b/py311/lib/python3.11/site-packages/fontTools/cffLib/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..b9b7c86c8b450d8a599780b1af53004afa9004d7 --- /dev/null +++ b/py311/lib/python3.11/site-packages/fontTools/cffLib/transforms.py @@ -0,0 +1,495 @@ +from fontTools.misc.psCharStrings import ( + SimpleT2Decompiler, + T2WidthExtractor, + calcSubrBias, +) + + +def _uniq_sort(l): + return sorted(set(l)) + + +class StopHintCountEvent(Exception): + pass + + +class _DesubroutinizingT2Decompiler(SimpleT2Decompiler): + stop_hintcount_ops = ( + "op_hintmask", + "op_cntrmask", + "op_rmoveto", + "op_hmoveto", + "op_vmoveto", + ) + + def __init__(self, localSubrs, globalSubrs, private=None): + SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs, private) + + def execute(self, charString): + self.need_hintcount = True # until proven otherwise + for op_name in self.stop_hintcount_ops: + setattr(self, op_name, self.stop_hint_count) + + if hasattr(charString, "_desubroutinized"): + # If a charstring has already been desubroutinized, we will still + # need to execute it if we need to count hints in order to + # compute the byte length for mask arguments, and haven't finished + # counting hints pairs. + if self.need_hintcount and self.callingStack: + try: + SimpleT2Decompiler.execute(self, charString) + except StopHintCountEvent: + del self.callingStack[-1] + return + + charString._patches = [] + SimpleT2Decompiler.execute(self, charString) + desubroutinized = charString.program[:] + for idx, expansion in reversed(charString._patches): + assert idx >= 2 + assert desubroutinized[idx - 1] in [ + "callsubr", + "callgsubr", + ], desubroutinized[idx - 1] + assert type(desubroutinized[idx - 2]) == int + if expansion[-1] == "return": + expansion = expansion[:-1] + desubroutinized[idx - 2 : idx] = expansion + if not self.private.in_cff2: + if "endchar" in desubroutinized: + # Cut off after first endchar + desubroutinized = desubroutinized[ + : desubroutinized.index("endchar") + 1 + ] + + charString._desubroutinized = desubroutinized + del charString._patches + + def op_callsubr(self, index): + subr = self.localSubrs[self.operandStack[-1] + self.localBias] + SimpleT2Decompiler.op_callsubr(self, index) + self.processSubr(index, subr) + + def op_callgsubr(self, index): + subr = self.globalSubrs[self.operandStack[-1] + self.globalBias] + SimpleT2Decompiler.op_callgsubr(self, index) + self.processSubr(index, subr) + + def stop_hint_count(self, *args): + self.need_hintcount = False + for op_name in self.stop_hintcount_ops: + setattr(self, op_name, None) + cs = self.callingStack[-1] + if hasattr(cs, "_desubroutinized"): + raise StopHintCountEvent() + + def op_hintmask(self, index): + SimpleT2Decompiler.op_hintmask(self, index) + if self.need_hintcount: + self.stop_hint_count() + + def processSubr(self, index, subr): + cs = self.callingStack[-1] + if not hasattr(cs, "_desubroutinized"): + cs._patches.append((index, subr._desubroutinized)) + + +def desubroutinizeCharString(cs): + """Desubroutinize a charstring in-place.""" + cs.decompile() + subrs = getattr(cs.private, "Subrs", []) + decompiler = _DesubroutinizingT2Decompiler(subrs, cs.globalSubrs, cs.private) + decompiler.execute(cs) + cs.program = cs._desubroutinized + del cs._desubroutinized + + +def desubroutinize(cff): + for fontName in cff.fontNames: + font = cff[fontName] + cs = font.CharStrings + for c in cs.values(): + desubroutinizeCharString(c) + # Delete all the local subrs + if hasattr(font, "FDArray"): + for fd in font.FDArray: + pd = fd.Private + if hasattr(pd, "Subrs"): + del pd.Subrs + if "Subrs" in pd.rawDict: + del pd.rawDict["Subrs"] + else: + pd = font.Private + if hasattr(pd, "Subrs"): + del pd.Subrs + if "Subrs" in pd.rawDict: + del pd.rawDict["Subrs"] + # as well as the global subrs + cff.GlobalSubrs.clear() + + +class _MarkingT2Decompiler(SimpleT2Decompiler): + def __init__(self, localSubrs, globalSubrs, private): + SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs, private) + for subrs in [localSubrs, globalSubrs]: + if subrs and not hasattr(subrs, "_used"): + subrs._used = set() + + def op_callsubr(self, index): + self.localSubrs._used.add(self.operandStack[-1] + self.localBias) + SimpleT2Decompiler.op_callsubr(self, index) + + def op_callgsubr(self, index): + self.globalSubrs._used.add(self.operandStack[-1] + self.globalBias) + SimpleT2Decompiler.op_callgsubr(self, index) + + +class _DehintingT2Decompiler(T2WidthExtractor): + class Hints(object): + def __init__(self): + # Whether calling this charstring produces any hint stems + # Note that if a charstring starts with hintmask, it will + # have has_hint set to True, because it *might* produce an + # implicit vstem if called under certain conditions. + self.has_hint = False + # Index to start at to drop all hints + self.last_hint = 0 + # Index up to which we know more hints are possible. + # Only relevant if status is 0 or 1. + self.last_checked = 0 + # The status means: + # 0: after dropping hints, this charstring is empty + # 1: after dropping hints, there may be more hints + # continuing after this, or there might be + # other things. Not clear yet. + # 2: no more hints possible after this charstring + self.status = 0 + # Has hintmask instructions; not recursive + self.has_hintmask = False + # List of indices of calls to empty subroutines to remove. + self.deletions = [] + + pass + + def __init__( + self, css, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None + ): + self._css = css + T2WidthExtractor.__init__( + self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX + ) + self.private = private + + def execute(self, charString): + old_hints = charString._hints if hasattr(charString, "_hints") else None + charString._hints = self.Hints() + + T2WidthExtractor.execute(self, charString) + + hints = charString._hints + + if hints.has_hint or hints.has_hintmask: + self._css.add(charString) + + if hints.status != 2: + # Check from last_check, make sure we didn't have any operators. + for i in range(hints.last_checked, len(charString.program) - 1): + if isinstance(charString.program[i], str): + hints.status = 2 + break + else: + hints.status = 1 # There's *something* here + hints.last_checked = len(charString.program) + + if old_hints: + assert hints.__dict__ == old_hints.__dict__ + + def op_callsubr(self, index): + subr = self.localSubrs[self.operandStack[-1] + self.localBias] + T2WidthExtractor.op_callsubr(self, index) + self.processSubr(index, subr) + + def op_callgsubr(self, index): + subr = self.globalSubrs[self.operandStack[-1] + self.globalBias] + T2WidthExtractor.op_callgsubr(self, index) + self.processSubr(index, subr) + + def op_hstem(self, index): + T2WidthExtractor.op_hstem(self, index) + self.processHint(index) + + def op_vstem(self, index): + T2WidthExtractor.op_vstem(self, index) + self.processHint(index) + + def op_hstemhm(self, index): + T2WidthExtractor.op_hstemhm(self, index) + self.processHint(index) + + def op_vstemhm(self, index): + T2WidthExtractor.op_vstemhm(self, index) + self.processHint(index) + + def op_hintmask(self, index): + rv = T2WidthExtractor.op_hintmask(self, index) + self.processHintmask(index) + return rv + + def op_cntrmask(self, index): + rv = T2WidthExtractor.op_cntrmask(self, index) + self.processHintmask(index) + return rv + + def processHintmask(self, index): + cs = self.callingStack[-1] + hints = cs._hints + hints.has_hintmask = True + if hints.status != 2: + # Check from last_check, see if we may be an implicit vstem + for i in range(hints.last_checked, index - 1): + if isinstance(cs.program[i], str): + hints.status = 2 + break + else: + # We are an implicit vstem + hints.has_hint = True + hints.last_hint = index + 1 + hints.status = 0 + hints.last_checked = index + 1 + + def processHint(self, index): + cs = self.callingStack[-1] + hints = cs._hints + hints.has_hint = True + hints.last_hint = index + hints.last_checked = index + + def processSubr(self, index, subr): + cs = self.callingStack[-1] + hints = cs._hints + subr_hints = subr._hints + + # Check from last_check, make sure we didn't have + # any operators. + if hints.status != 2: + for i in range(hints.last_checked, index - 1): + if isinstance(cs.program[i], str): + hints.status = 2 + break + hints.last_checked = index + + if hints.status != 2: + if subr_hints.has_hint: + hints.has_hint = True + + # Decide where to chop off from + if subr_hints.status == 0: + hints.last_hint = index + else: + hints.last_hint = index - 2 # Leave the subr call in + + elif subr_hints.status == 0: + hints.deletions.append(index) + + hints.status = max(hints.status, subr_hints.status) + + +def _cs_subset_subroutines(charstring, subrs, gsubrs): + p = charstring.program + for i in range(1, len(p)): + if p[i] == "callsubr": + assert isinstance(p[i - 1], int) + p[i - 1] = subrs._used.index(p[i - 1] + subrs._old_bias) - subrs._new_bias + elif p[i] == "callgsubr": + assert isinstance(p[i - 1], int) + p[i - 1] = ( + gsubrs._used.index(p[i - 1] + gsubrs._old_bias) - gsubrs._new_bias + ) + + +def _cs_drop_hints(charstring): + hints = charstring._hints + + if hints.deletions: + p = charstring.program + for idx in reversed(hints.deletions): + del p[idx - 2 : idx] + + if hints.has_hint: + assert not hints.deletions or hints.last_hint <= hints.deletions[0] + charstring.program = charstring.program[hints.last_hint :] + if not charstring.program: + # TODO CFF2 no need for endchar. + charstring.program.append("endchar") + if hasattr(charstring, "width"): + # Insert width back if needed + if charstring.width != charstring.private.defaultWidthX: + # For CFF2 charstrings, this should never happen + assert ( + charstring.private.defaultWidthX is not None + ), "CFF2 CharStrings must not have an initial width value" + charstring.program.insert( + 0, charstring.width - charstring.private.nominalWidthX + ) + + if hints.has_hintmask: + i = 0 + p = charstring.program + while i < len(p): + if p[i] in ["hintmask", "cntrmask"]: + assert i + 1 <= len(p) + del p[i : i + 2] + continue + i += 1 + + assert len(charstring.program) + + del charstring._hints + + +def remove_hints(cff, *, removeUnusedSubrs: bool = True): + for fontname in cff.keys(): + font = cff[fontname] + cs = font.CharStrings + # This can be tricky, but doesn't have to. What we do is: + # + # - Run all used glyph charstrings and recurse into subroutines, + # - For each charstring (including subroutines), if it has any + # of the hint stem operators, we mark it as such. + # Upon returning, for each charstring we note all the + # subroutine calls it makes that (recursively) contain a stem, + # - Dropping hinting then consists of the following two ops: + # * Drop the piece of the program in each charstring before the + # last call to a stem op or a stem-calling subroutine, + # * Drop all hintmask operations. + # - It's trickier... A hintmask right after hints and a few numbers + # will act as an implicit vstemhm. As such, we track whether + # we have seen any non-hint operators so far and do the right + # thing, recursively... Good luck understanding that :( + css = set() + for c in cs.values(): + c.decompile() + subrs = getattr(c.private, "Subrs", []) + decompiler = _DehintingT2Decompiler( + css, + subrs, + c.globalSubrs, + c.private.nominalWidthX, + c.private.defaultWidthX, + c.private, + ) + decompiler.execute(c) + c.width = decompiler.width + for charstring in css: + _cs_drop_hints(charstring) + del css + + # Drop font-wide hinting values + all_privs = [] + if hasattr(font, "FDArray"): + all_privs.extend(fd.Private for fd in font.FDArray) + else: + all_privs.append(font.Private) + for priv in all_privs: + for k in [ + "BlueValues", + "OtherBlues", + "FamilyBlues", + "FamilyOtherBlues", + "BlueScale", + "BlueShift", + "BlueFuzz", + "StemSnapH", + "StemSnapV", + "StdHW", + "StdVW", + "ForceBold", + "LanguageGroup", + "ExpansionFactor", + ]: + if hasattr(priv, k): + setattr(priv, k, None) + if removeUnusedSubrs: + remove_unused_subroutines(cff) + + +def _pd_delete_empty_subrs(private_dict): + if hasattr(private_dict, "Subrs") and not private_dict.Subrs: + if "Subrs" in private_dict.rawDict: + del private_dict.rawDict["Subrs"] + del private_dict.Subrs + + +def remove_unused_subroutines(cff): + for fontname in cff.keys(): + font = cff[fontname] + cs = font.CharStrings + # Renumber subroutines to remove unused ones + + # Mark all used subroutines + for c in cs.values(): + subrs = getattr(c.private, "Subrs", []) + decompiler = _MarkingT2Decompiler(subrs, c.globalSubrs, c.private) + decompiler.execute(c) + + all_subrs = [font.GlobalSubrs] + if hasattr(font, "FDArray"): + all_subrs.extend( + fd.Private.Subrs + for fd in font.FDArray + if hasattr(fd.Private, "Subrs") and fd.Private.Subrs + ) + elif hasattr(font.Private, "Subrs") and font.Private.Subrs: + all_subrs.append(font.Private.Subrs) + + subrs = set(subrs) # Remove duplicates + + # Prepare + for subrs in all_subrs: + if not hasattr(subrs, "_used"): + subrs._used = set() + subrs._used = _uniq_sort(subrs._used) + subrs._old_bias = calcSubrBias(subrs) + subrs._new_bias = calcSubrBias(subrs._used) + + # Renumber glyph charstrings + for c in cs.values(): + subrs = getattr(c.private, "Subrs", None) + _cs_subset_subroutines(c, subrs, font.GlobalSubrs) + + # Renumber subroutines themselves + for subrs in all_subrs: + if subrs == font.GlobalSubrs: + if not hasattr(font, "FDArray") and hasattr(font.Private, "Subrs"): + local_subrs = font.Private.Subrs + elif ( + hasattr(font, "FDArray") + and len(font.FDArray) == 1 + and hasattr(font.FDArray[0].Private, "Subrs") + ): + # Technically we shouldn't do this. But I've run into fonts that do it. + local_subrs = font.FDArray[0].Private.Subrs + else: + local_subrs = None + else: + local_subrs = subrs + + subrs.items = [subrs.items[i] for i in subrs._used] + if hasattr(subrs, "file"): + del subrs.file + if hasattr(subrs, "offsets"): + del subrs.offsets + + for subr in subrs.items: + _cs_subset_subroutines(subr, local_subrs, font.GlobalSubrs) + + # Delete local SubrsIndex if empty + if hasattr(font, "FDArray"): + for fd in font.FDArray: + _pd_delete_empty_subrs(fd.Private) + else: + _pd_delete_empty_subrs(font.Private) + + # Cleanup + for subrs in all_subrs: + del subrs._used, subrs._old_bias, subrs._new_bias diff --git a/py311/lib/python3.11/site-packages/fontTools/cffLib/width.py b/py311/lib/python3.11/site-packages/fontTools/cffLib/width.py new file mode 100644 index 0000000000000000000000000000000000000000..78ff27e4fd85636141cddac2ee8035c0d7e33e03 --- /dev/null +++ b/py311/lib/python3.11/site-packages/fontTools/cffLib/width.py @@ -0,0 +1,210 @@ +# -*- coding: utf-8 -*- + +"""T2CharString glyph width optimizer. + +CFF glyphs whose width equals the CFF Private dictionary's ``defaultWidthX`` +value do not need to specify their width in their charstring, saving bytes. +This module determines the optimum ``defaultWidthX`` and ``nominalWidthX`` +values for a font, when provided with a list of glyph widths.""" + +from fontTools.ttLib import TTFont +from collections import defaultdict +from operator import add +from functools import reduce + + +__all__ = ["optimizeWidths", "main"] + + +class missingdict(dict): + def __init__(self, missing_func): + self.missing_func = missing_func + + def __missing__(self, v): + return self.missing_func(v) + + +def cumSum(f, op=add, start=0, decreasing=False): + keys = sorted(f.keys()) + minx, maxx = keys[0], keys[-1] + + total = reduce(op, f.values(), start) + + if decreasing: + missing = lambda x: start if x > maxx else total + domain = range(maxx, minx - 1, -1) + else: + missing = lambda x: start if x < minx else total + domain = range(minx, maxx + 1) + + out = missingdict(missing) + + v = start + for x in domain: + v = op(v, f[x]) + out[x] = v + + return out + + +def byteCost(widths, default, nominal): + if not hasattr(widths, "items"): + d = defaultdict(int) + for w in widths: + d[w] += 1 + widths = d + + cost = 0 + for w, freq in widths.items(): + if w == default: + continue + diff = abs(w - nominal) + if diff <= 107: + cost += freq + elif diff <= 1131: + cost += freq * 2 + else: + cost += freq * 5 + return cost + + +def optimizeWidthsBruteforce(widths): + """Bruteforce version. Veeeeeeeeeeeeeeeeery slow. Only works for smallests of fonts.""" + + d = defaultdict(int) + for w in widths: + d[w] += 1 + + # Maximum number of bytes using default can possibly save + maxDefaultAdvantage = 5 * max(d.values()) + + minw, maxw = min(widths), max(widths) + domain = list(range(minw, maxw + 1)) + + bestCostWithoutDefault = min(byteCost(widths, None, nominal) for nominal in domain) + + bestCost = len(widths) * 5 + 1 + for nominal in domain: + if byteCost(widths, None, nominal) > bestCost + maxDefaultAdvantage: + continue + for default in domain: + cost = byteCost(widths, default, nominal) + if cost < bestCost: + bestCost = cost + bestDefault = default + bestNominal = nominal + + return bestDefault, bestNominal + + +def optimizeWidths(widths): + """Given a list of glyph widths, or dictionary mapping glyph width to number of + glyphs having that, returns a tuple of best CFF default and nominal glyph widths. + + This algorithm is linear in UPEM+numGlyphs.""" + + if not hasattr(widths, "items"): + d = defaultdict(int) + for w in widths: + d[w] += 1 + widths = d + + keys = sorted(widths.keys()) + minw, maxw = keys[0], keys[-1] + domain = list(range(minw, maxw + 1)) + + # Cumulative sum/max forward/backward. + cumFrqU = cumSum(widths, op=add) + cumMaxU = cumSum(widths, op=max) + cumFrqD = cumSum(widths, op=add, decreasing=True) + cumMaxD = cumSum(widths, op=max, decreasing=True) + + # Cost per nominal choice, without default consideration. + nomnCostU = missingdict( + lambda x: cumFrqU[x] + cumFrqU[x - 108] + cumFrqU[x - 1132] * 3 + ) + nomnCostD = missingdict( + lambda x: cumFrqD[x] + cumFrqD[x + 108] + cumFrqD[x + 1132] * 3 + ) + nomnCost = missingdict(lambda x: nomnCostU[x] + nomnCostD[x] - widths[x]) + + # Cost-saving per nominal choice, by best default choice. + dfltCostU = missingdict( + lambda x: max(cumMaxU[x], cumMaxU[x - 108] * 2, cumMaxU[x - 1132] * 5) + ) + dfltCostD = missingdict( + lambda x: max(cumMaxD[x], cumMaxD[x + 108] * 2, cumMaxD[x + 1132] * 5) + ) + dfltCost = missingdict(lambda x: max(dfltCostU[x], dfltCostD[x])) + + # Combined cost per nominal choice. + bestCost = missingdict(lambda x: nomnCost[x] - dfltCost[x]) + + # Best nominal. + nominal = min(domain, key=lambda x: bestCost[x]) + + # Work back the best default. + bestC = bestCost[nominal] + dfltC = nomnCost[nominal] - bestCost[nominal] + ends = [] + if dfltC == dfltCostU[nominal]: + starts = [nominal, nominal - 108, nominal - 1132] + for start in starts: + while cumMaxU[start] and cumMaxU[start] == cumMaxU[start - 1]: + start -= 1 + ends.append(start) + else: + starts = [nominal, nominal + 108, nominal + 1132] + for start in starts: + while cumMaxD[start] and cumMaxD[start] == cumMaxD[start + 1]: + start += 1 + ends.append(start) + default = min(ends, key=lambda default: byteCost(widths, default, nominal)) + + return default, nominal + + +def main(args=None): + """Calculate optimum defaultWidthX/nominalWidthX values""" + + import argparse + + parser = argparse.ArgumentParser( + "fonttools cffLib.width", + description=main.__doc__, + ) + parser.add_argument( + "inputs", metavar="FILE", type=str, nargs="+", help="Input TTF files" + ) + parser.add_argument( + "-b", + "--brute-force", + dest="brute", + action="store_true", + help="Use brute-force approach (VERY slow)", + ) + + args = parser.parse_args(args) + + for fontfile in args.inputs: + font = TTFont(fontfile) + hmtx = font["hmtx"] + widths = [m[0] for m in hmtx.metrics.values()] + if args.brute: + default, nominal = optimizeWidthsBruteforce(widths) + else: + default, nominal = optimizeWidths(widths) + print( + "glyphs=%d default=%d nominal=%d byteCost=%d" + % (len(widths), default, nominal, byteCost(widths, default, nominal)) + ) + + +if __name__ == "__main__": + import sys + + if len(sys.argv) == 1: + import doctest + + sys.exit(doctest.testmod().failed) + main() diff --git a/py311/lib/python3.11/site-packages/fontTools/colorLib/__init__.py b/py311/lib/python3.11/site-packages/fontTools/colorLib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/fontTools/colorLib/builder.py b/py311/lib/python3.11/site-packages/fontTools/colorLib/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..6e45e7a885003764c7092a377abb87f73bca1008 --- /dev/null +++ b/py311/lib/python3.11/site-packages/fontTools/colorLib/builder.py @@ -0,0 +1,664 @@ +""" +colorLib.builder: Build COLR/CPAL tables from scratch + +""" + +import collections +import copy +import enum +from functools import partial +from math import ceil, log +from typing import ( + Any, + Dict, + Generator, + Iterable, + List, + Mapping, + Optional, + Sequence, + Tuple, + Type, + TypeVar, + Union, +) +from fontTools.misc.arrayTools import intRect +from fontTools.misc.fixedTools import fixedToFloat +from fontTools.misc.treeTools import build_n_ary_tree +from fontTools.ttLib.tables import C_O_L_R_ +from fontTools.ttLib.tables import C_P_A_L_ +from fontTools.ttLib.tables import _n_a_m_e +from fontTools.ttLib.tables import otTables as ot +from fontTools.ttLib.tables.otTables import ExtendMode, CompositeMode +from .errors import ColorLibError +from .geometry import round_start_circle_stable_containment +from .table_builder import BuildCallback, TableBuilder + + +# TODO move type aliases to colorLib.types? +T = TypeVar("T") +_Kwargs = Mapping[str, Any] +_PaintInput = Union[int, _Kwargs, ot.Paint, Tuple[str, "_PaintInput"]] +_PaintInputList = Sequence[_PaintInput] +_ColorGlyphsDict = Dict[str, Union[_PaintInputList, _PaintInput]] +_ColorGlyphsV0Dict = Dict[str, Sequence[Tuple[str, int]]] +_ClipBoxInput = Union[ + Tuple[int, int, int, int, int], # format 1, variable + Tuple[int, int, int, int], # format 0, non-variable + ot.ClipBox, +] + + +MAX_PAINT_COLR_LAYER_COUNT = 255 +_DEFAULT_ALPHA = 1.0 +_MAX_REUSE_LEN = 32 + + +def _beforeBuildPaintRadialGradient(paint, source): + x0 = source["x0"] + y0 = source["y0"] + r0 = source["r0"] + x1 = source["x1"] + y1 = source["y1"] + r1 = source["r1"] + + # TODO apparently no builder_test confirms this works (?) + + # avoid abrupt change after rounding when c0 is near c1's perimeter + c = round_start_circle_stable_containment((x0, y0), r0, (x1, y1), r1) + x0, y0 = c.centre + r0 = c.radius + + # update source to ensure paint is built with corrected values + source["x0"] = x0 + source["y0"] = y0 + source["r0"] = r0 + source["x1"] = x1 + source["y1"] = y1 + source["r1"] = r1 + + return paint, source + + +def _defaultColorStop(): + colorStop = ot.ColorStop() + colorStop.Alpha = _DEFAULT_ALPHA + return colorStop + + +def _defaultVarColorStop(): + colorStop = ot.VarColorStop() + colorStop.Alpha = _DEFAULT_ALPHA + return colorStop + + +def _defaultColorLine(): + colorLine = ot.ColorLine() + colorLine.Extend = ExtendMode.PAD + return colorLine + + +def _defaultVarColorLine(): + colorLine = ot.VarColorLine() + colorLine.Extend = ExtendMode.PAD + return colorLine + + +def _defaultPaintSolid(): + paint = ot.Paint() + paint.Alpha = _DEFAULT_ALPHA + return paint + + +def _buildPaintCallbacks(): + return { + ( + BuildCallback.BEFORE_BUILD, + ot.Paint, + ot.PaintFormat.PaintRadialGradient, + ): _beforeBuildPaintRadialGradient, + ( + BuildCallback.BEFORE_BUILD, + ot.Paint, + ot.PaintFormat.PaintVarRadialGradient, + ): _beforeBuildPaintRadialGradient, + (BuildCallback.CREATE_DEFAULT, ot.ColorStop): _defaultColorStop, + (BuildCallback.CREATE_DEFAULT, ot.VarColorStop): _defaultVarColorStop, + (BuildCallback.CREATE_DEFAULT, ot.ColorLine): _defaultColorLine, + (BuildCallback.CREATE_DEFAULT, ot.VarColorLine): _defaultVarColorLine, + ( + BuildCallback.CREATE_DEFAULT, + ot.Paint, + ot.PaintFormat.PaintSolid, + ): _defaultPaintSolid, + ( + BuildCallback.CREATE_DEFAULT, + ot.Paint, + ot.PaintFormat.PaintVarSolid, + ): _defaultPaintSolid, + } + + +def populateCOLRv0( + table: ot.COLR, + colorGlyphsV0: _ColorGlyphsV0Dict, + glyphMap: Optional[Mapping[str, int]] = None, +): + """Build v0 color layers and add to existing COLR table. + + Args: + table: a raw ``otTables.COLR()`` object (not ttLib's ``table_C_O_L_R_``). + colorGlyphsV0: map of base glyph names to lists of (layer glyph names, + color palette index) tuples. Can be empty. + glyphMap: a map from glyph names to glyph indices, as returned from + ``TTFont.getReverseGlyphMap()``, to optionally sort base records by GID. + """ + if glyphMap is not None: + colorGlyphItems = sorted( + colorGlyphsV0.items(), key=lambda item: glyphMap[item[0]] + ) + else: + colorGlyphItems = colorGlyphsV0.items() + baseGlyphRecords = [] + layerRecords = [] + for baseGlyph, layers in colorGlyphItems: + baseRec = ot.BaseGlyphRecord() + baseRec.BaseGlyph = baseGlyph + baseRec.FirstLayerIndex = len(layerRecords) + baseRec.NumLayers = len(layers) + baseGlyphRecords.append(baseRec) + + for layerGlyph, paletteIndex in layers: + layerRec = ot.LayerRecord() + layerRec.LayerGlyph = layerGlyph + layerRec.PaletteIndex = paletteIndex + layerRecords.append(layerRec) + + table.BaseGlyphRecordArray = table.LayerRecordArray = None + if baseGlyphRecords: + table.BaseGlyphRecordArray = ot.BaseGlyphRecordArray() + table.BaseGlyphRecordArray.BaseGlyphRecord = baseGlyphRecords + if layerRecords: + table.LayerRecordArray = ot.LayerRecordArray() + table.LayerRecordArray.LayerRecord = layerRecords + table.BaseGlyphRecordCount = len(baseGlyphRecords) + table.LayerRecordCount = len(layerRecords) + + +def buildCOLR( + colorGlyphs: _ColorGlyphsDict, + version: Optional[int] = None, + *, + glyphMap: Optional[Mapping[str, int]] = None, + varStore: Optional[ot.VarStore] = None, + varIndexMap: Optional[ot.DeltaSetIndexMap] = None, + clipBoxes: Optional[Dict[str, _ClipBoxInput]] = None, + allowLayerReuse: bool = True, +) -> C_O_L_R_.table_C_O_L_R_: + """Build COLR table from color layers mapping. + + Args: + + colorGlyphs: map of base glyph name to, either list of (layer glyph name, + color palette index) tuples for COLRv0; or a single ``Paint`` (dict) or + list of ``Paint`` for COLRv1. + version: the version of COLR table. If None, the version is determined + by the presence of COLRv1 paints or variation data (varStore), which + require version 1; otherwise, if all base glyphs use only simple color + layers, version 0 is used. + glyphMap: a map from glyph names to glyph indices, as returned from + TTFont.getReverseGlyphMap(), to optionally sort base records by GID. + varStore: Optional ItemVarationStore for deltas associated with v1 layer. + varIndexMap: Optional DeltaSetIndexMap for deltas associated with v1 layer. + clipBoxes: Optional map of base glyph name to clip box 4- or 5-tuples: + (xMin, yMin, xMax, yMax) or (xMin, yMin, xMax, yMax, varIndexBase). + + Returns: + A new COLR table. + """ + self = C_O_L_R_.table_C_O_L_R_() + + if varStore is not None and version == 0: + raise ValueError("Can't add VarStore to COLRv0") + + if version in (None, 0) and not varStore: + # split color glyphs into v0 and v1 and encode separately + colorGlyphsV0, colorGlyphsV1 = _split_color_glyphs_by_version(colorGlyphs) + if version == 0 and colorGlyphsV1: + raise ValueError("Can't encode COLRv1 glyphs in COLRv0") + else: + # unless explicitly requested for v1 or have variations, in which case + # we encode all color glyph as v1 + colorGlyphsV0, colorGlyphsV1 = {}, colorGlyphs + + colr = ot.COLR() + + populateCOLRv0(colr, colorGlyphsV0, glyphMap) + + colr.LayerList, colr.BaseGlyphList = buildColrV1( + colorGlyphsV1, + glyphMap, + allowLayerReuse=allowLayerReuse, + ) + + if version is None: + version = 1 if (varStore or colorGlyphsV1) else 0 + elif version not in (0, 1): + raise NotImplementedError(version) + self.version = colr.Version = version + + if version == 0: + self.ColorLayers = self._decompileColorLayersV0(colr) + else: + colr.ClipList = buildClipList(clipBoxes) if clipBoxes else None + colr.VarIndexMap = varIndexMap + colr.VarStore = varStore + self.table = colr + + return self + + +def buildClipList(clipBoxes: Dict[str, _ClipBoxInput]) -> ot.ClipList: + clipList = ot.ClipList() + clipList.Format = 1 + clipList.clips = {name: buildClipBox(box) for name, box in clipBoxes.items()} + return clipList + + +def buildClipBox(clipBox: _ClipBoxInput) -> ot.ClipBox: + if isinstance(clipBox, ot.ClipBox): + return clipBox + n = len(clipBox) + clip = ot.ClipBox() + if n not in (4, 5): + raise ValueError(f"Invalid ClipBox: expected 4 or 5 values, found {n}") + clip.xMin, clip.yMin, clip.xMax, clip.yMax = intRect(clipBox[:4]) + clip.Format = int(n == 5) + 1 + if n == 5: + clip.VarIndexBase = int(clipBox[4]) + return clip + + +class ColorPaletteType(enum.IntFlag): + USABLE_WITH_LIGHT_BACKGROUND = 0x0001 + USABLE_WITH_DARK_BACKGROUND = 0x0002 + + @classmethod + def _missing_(cls, value): + # enforce reserved bits + if isinstance(value, int) and (value < 0 or value & 0xFFFC != 0): + raise ValueError(f"{value} is not a valid {cls.__name__}") + return super()._missing_(value) + + +# None, 'abc' or {'en': 'abc', 'de': 'xyz'} +_OptionalLocalizedString = Union[None, str, Dict[str, str]] + + +def buildPaletteLabels( + labels: Iterable[_OptionalLocalizedString], nameTable: _n_a_m_e.table__n_a_m_e +) -> List[Optional[int]]: + return [ + ( + nameTable.addMultilingualName(l, mac=False) + if isinstance(l, dict) + else ( + C_P_A_L_.table_C_P_A_L_.NO_NAME_ID + if l is None + else nameTable.addMultilingualName({"en": l}, mac=False) + ) + ) + for l in labels + ] + + +def buildCPAL( + palettes: Sequence[Sequence[Tuple[float, float, float, float]]], + paletteTypes: Optional[Sequence[ColorPaletteType]] = None, + paletteLabels: Optional[Sequence[_OptionalLocalizedString]] = None, + paletteEntryLabels: Optional[Sequence[_OptionalLocalizedString]] = None, + nameTable: Optional[_n_a_m_e.table__n_a_m_e] = None, +) -> C_P_A_L_.table_C_P_A_L_: + """Build CPAL table from list of color palettes. + + Args: + palettes: list of lists of colors encoded as tuples of (R, G, B, A) floats + in the range [0..1]. + paletteTypes: optional list of ColorPaletteType, one for each palette. + paletteLabels: optional list of palette labels. Each lable can be either: + None (no label), a string (for for default English labels), or a + localized string (as a dict keyed with BCP47 language codes). + paletteEntryLabels: optional list of palette entry labels, one for each + palette entry (see paletteLabels). + nameTable: optional name table where to store palette and palette entry + labels. Required if either paletteLabels or paletteEntryLabels is set. + + Return: + A new CPAL v0 or v1 table, if custom palette types or labels are specified. + """ + if len({len(p) for p in palettes}) != 1: + raise ColorLibError("color palettes have different lengths") + + if (paletteLabels or paletteEntryLabels) and not nameTable: + raise TypeError( + "nameTable is required if palette or palette entries have labels" + ) + + cpal = C_P_A_L_.table_C_P_A_L_() + cpal.numPaletteEntries = len(palettes[0]) + + cpal.palettes = [] + for i, palette in enumerate(palettes): + colors = [] + for j, color in enumerate(palette): + if not isinstance(color, tuple) or len(color) != 4: + raise ColorLibError( + f"In palette[{i}][{j}]: expected (R, G, B, A) tuple, got {color!r}" + ) + if any(v > 1 or v < 0 for v in color): + raise ColorLibError( + f"palette[{i}][{j}] has invalid out-of-range [0..1] color: {color!r}" + ) + # input colors are RGBA, CPAL encodes them as BGRA + red, green, blue, alpha = color + colors.append( + C_P_A_L_.Color(*(round(v * 255) for v in (blue, green, red, alpha))) + ) + cpal.palettes.append(colors) + + if any(v is not None for v in (paletteTypes, paletteLabels, paletteEntryLabels)): + cpal.version = 1 + + if paletteTypes is not None: + if len(paletteTypes) != len(palettes): + raise ColorLibError( + f"Expected {len(palettes)} paletteTypes, got {len(paletteTypes)}" + ) + cpal.paletteTypes = [ColorPaletteType(t).value for t in paletteTypes] + else: + cpal.paletteTypes = [C_P_A_L_.table_C_P_A_L_.DEFAULT_PALETTE_TYPE] * len( + palettes + ) + + if paletteLabels is not None: + if len(paletteLabels) != len(palettes): + raise ColorLibError( + f"Expected {len(palettes)} paletteLabels, got {len(paletteLabels)}" + ) + cpal.paletteLabels = buildPaletteLabels(paletteLabels, nameTable) + else: + cpal.paletteLabels = [C_P_A_L_.table_C_P_A_L_.NO_NAME_ID] * len(palettes) + + if paletteEntryLabels is not None: + if len(paletteEntryLabels) != cpal.numPaletteEntries: + raise ColorLibError( + f"Expected {cpal.numPaletteEntries} paletteEntryLabels, " + f"got {len(paletteEntryLabels)}" + ) + cpal.paletteEntryLabels = buildPaletteLabels(paletteEntryLabels, nameTable) + else: + cpal.paletteEntryLabels = [ + C_P_A_L_.table_C_P_A_L_.NO_NAME_ID + ] * cpal.numPaletteEntries + else: + cpal.version = 0 + + return cpal + + +# COLR v1 tables +# See draft proposal at: https://github.com/googlefonts/colr-gradients-spec + + +def _is_colrv0_layer(layer: Any) -> bool: + # Consider as COLRv0 layer any sequence of length 2 (be it tuple or list) in which + # the first element is a str (the layerGlyph) and the second element is an int + # (CPAL paletteIndex). + # https://github.com/googlefonts/ufo2ft/issues/426 + try: + layerGlyph, paletteIndex = layer + except (TypeError, ValueError): + return False + else: + return isinstance(layerGlyph, str) and isinstance(paletteIndex, int) + + +def _split_color_glyphs_by_version( + colorGlyphs: _ColorGlyphsDict, +) -> Tuple[_ColorGlyphsV0Dict, _ColorGlyphsDict]: + colorGlyphsV0 = {} + colorGlyphsV1 = {} + for baseGlyph, layers in colorGlyphs.items(): + if all(_is_colrv0_layer(l) for l in layers): + colorGlyphsV0[baseGlyph] = layers + else: + colorGlyphsV1[baseGlyph] = layers + + # sanity check + assert set(colorGlyphs) == (set(colorGlyphsV0) | set(colorGlyphsV1)) + + return colorGlyphsV0, colorGlyphsV1 + + +def _reuse_ranges(num_layers: int) -> Generator[Tuple[int, int], None, None]: + # TODO feels like something itertools might have already + for lbound in range(num_layers): + # Reuse of very large #s of layers is relatively unlikely + # +2: we want sequences of at least 2 + # otData handles single-record duplication + for ubound in range( + lbound + 2, min(num_layers + 1, lbound + 2 + _MAX_REUSE_LEN) + ): + yield (lbound, ubound) + + +class LayerReuseCache: + reusePool: Mapping[Tuple[Any, ...], int] + tuples: Mapping[int, Tuple[Any, ...]] + keepAlive: List[ot.Paint] # we need id to remain valid + + def __init__(self): + self.reusePool = {} + self.tuples = {} + self.keepAlive = [] + + def _paint_tuple(self, paint: ot.Paint): + # start simple, who even cares about cyclic graphs or interesting field types + def _tuple_safe(value): + if isinstance(value, enum.Enum): + return value + elif hasattr(value, "__dict__"): + return tuple( + (k, _tuple_safe(v)) for k, v in sorted(value.__dict__.items()) + ) + elif isinstance(value, collections.abc.MutableSequence): + return tuple(_tuple_safe(e) for e in value) + return value + + # Cache the tuples for individual Paint instead of the whole sequence + # because the seq could be a transient slice + result = self.tuples.get(id(paint), None) + if result is None: + result = _tuple_safe(paint) + self.tuples[id(paint)] = result + self.keepAlive.append(paint) + return result + + def _as_tuple(self, paints: Sequence[ot.Paint]) -> Tuple[Any, ...]: + return tuple(self._paint_tuple(p) for p in paints) + + def try_reuse(self, layers: List[ot.Paint]) -> List[ot.Paint]: + found_reuse = True + while found_reuse: + found_reuse = False + + ranges = sorted( + _reuse_ranges(len(layers)), + key=lambda t: (t[1] - t[0], t[1], t[0]), + reverse=True, + ) + for lbound, ubound in ranges: + reuse_lbound = self.reusePool.get( + self._as_tuple(layers[lbound:ubound]), -1 + ) + if reuse_lbound == -1: + continue + new_slice = ot.Paint() + new_slice.Format = int(ot.PaintFormat.PaintColrLayers) + new_slice.NumLayers = ubound - lbound + new_slice.FirstLayerIndex = reuse_lbound + layers = layers[:lbound] + [new_slice] + layers[ubound:] + found_reuse = True + break + return layers + + def add(self, layers: List[ot.Paint], first_layer_index: int): + for lbound, ubound in _reuse_ranges(len(layers)): + self.reusePool[self._as_tuple(layers[lbound:ubound])] = ( + lbound + first_layer_index + ) + + +class LayerListBuilder: + layers: List[ot.Paint] + cache: LayerReuseCache + allowLayerReuse: bool + + def __init__(self, *, allowLayerReuse=True): + self.layers = [] + if allowLayerReuse: + self.cache = LayerReuseCache() + else: + self.cache = None + + # We need to intercept construction of PaintColrLayers + callbacks = _buildPaintCallbacks() + callbacks[ + ( + BuildCallback.BEFORE_BUILD, + ot.Paint, + ot.PaintFormat.PaintColrLayers, + ) + ] = self._beforeBuildPaintColrLayers + self.tableBuilder = TableBuilder(callbacks) + + # COLR layers is unusual in that it modifies shared state + # so we need a callback into an object + def _beforeBuildPaintColrLayers(self, dest, source): + # Sketchy gymnastics: a sequence input will have dropped it's layers + # into NumLayers; get it back + if isinstance(source.get("NumLayers", None), collections.abc.Sequence): + layers = source["NumLayers"] + else: + layers = source["Layers"] + + # Convert maps seqs or whatever into typed objects + layers = [self.buildPaint(l) for l in layers] + + # No reason to have a colr layers with just one entry + if len(layers) == 1: + return layers[0], {} + + if self.cache is not None: + # Look for reuse, with preference to longer sequences + # This may make the layer list smaller + layers = self.cache.try_reuse(layers) + + # The layer list is now final; if it's too big we need to tree it + is_tree = len(layers) > MAX_PAINT_COLR_LAYER_COUNT + layers = build_n_ary_tree(layers, n=MAX_PAINT_COLR_LAYER_COUNT) + + # We now have a tree of sequences with Paint leaves. + # Convert the sequences into PaintColrLayers. + def listToColrLayers(layer): + if isinstance(layer, collections.abc.Sequence): + return self.buildPaint( + { + "Format": ot.PaintFormat.PaintColrLayers, + "Layers": [listToColrLayers(l) for l in layer], + } + ) + return layer + + layers = [listToColrLayers(l) for l in layers] + + # No reason to have a colr layers with just one entry + if len(layers) == 1: + return layers[0], {} + + paint = ot.Paint() + paint.Format = int(ot.PaintFormat.PaintColrLayers) + paint.NumLayers = len(layers) + paint.FirstLayerIndex = len(self.layers) + self.layers.extend(layers) + + # Register our parts for reuse provided we aren't a tree + # If we are a tree the leaves registered for reuse and that will suffice + if self.cache is not None and not is_tree: + self.cache.add(layers, paint.FirstLayerIndex) + + # we've fully built dest; empty source prevents generalized build from kicking in + return paint, {} + + def buildPaint(self, paint: _PaintInput) -> ot.Paint: + return self.tableBuilder.build(ot.Paint, paint) + + def build(self) -> Optional[ot.LayerList]: + if not self.layers: + return None + layers = ot.LayerList() + layers.LayerCount = len(self.layers) + layers.Paint = self.layers + return layers + + +def buildBaseGlyphPaintRecord( + baseGlyph: str, layerBuilder: LayerListBuilder, paint: _PaintInput +) -> ot.BaseGlyphList: + self = ot.BaseGlyphPaintRecord() + self.BaseGlyph = baseGlyph + self.Paint = layerBuilder.buildPaint(paint) + return self + + +def _format_glyph_errors(errors: Mapping[str, Exception]) -> str: + lines = [] + for baseGlyph, error in sorted(errors.items()): + lines.append(f" {baseGlyph} => {type(error).__name__}: {error}") + return "\n".join(lines) + + +def buildColrV1( + colorGlyphs: _ColorGlyphsDict, + glyphMap: Optional[Mapping[str, int]] = None, + *, + allowLayerReuse: bool = True, +) -> Tuple[Optional[ot.LayerList], ot.BaseGlyphList]: + if glyphMap is not None: + colorGlyphItems = sorted( + colorGlyphs.items(), key=lambda item: glyphMap[item[0]] + ) + else: + colorGlyphItems = colorGlyphs.items() + + errors = {} + baseGlyphs = [] + layerBuilder = LayerListBuilder(allowLayerReuse=allowLayerReuse) + for baseGlyph, paint in colorGlyphItems: + try: + baseGlyphs.append(buildBaseGlyphPaintRecord(baseGlyph, layerBuilder, paint)) + + except (ColorLibError, OverflowError, ValueError, TypeError) as e: + errors[baseGlyph] = e + + if errors: + failed_glyphs = _format_glyph_errors(errors) + exc = ColorLibError(f"Failed to build BaseGlyphList:\n{failed_glyphs}") + exc.errors = errors + raise exc from next(iter(errors.values())) + + layers = layerBuilder.build() + glyphs = ot.BaseGlyphList() + glyphs.BaseGlyphCount = len(baseGlyphs) + glyphs.BaseGlyphPaintRecord = baseGlyphs + return (layers, glyphs) diff --git a/py311/lib/python3.11/site-packages/fontTools/colorLib/errors.py b/py311/lib/python3.11/site-packages/fontTools/colorLib/errors.py new file mode 100644 index 0000000000000000000000000000000000000000..18cbebbaf91ff7d5a515321a006be3eb1d83faaf --- /dev/null +++ b/py311/lib/python3.11/site-packages/fontTools/colorLib/errors.py @@ -0,0 +1,2 @@ +class ColorLibError(Exception): + pass diff --git a/py311/lib/python3.11/site-packages/fontTools/colorLib/geometry.py b/py311/lib/python3.11/site-packages/fontTools/colorLib/geometry.py new file mode 100644 index 0000000000000000000000000000000000000000..1ce161bfa117df1632b507d161f0dd4abb633bcc --- /dev/null +++ b/py311/lib/python3.11/site-packages/fontTools/colorLib/geometry.py @@ -0,0 +1,143 @@ +"""Helpers for manipulating 2D points and vectors in COLR table.""" + +from math import copysign, cos, hypot, isclose, pi +from fontTools.misc.roundTools import otRound + + +def _vector_between(origin, target): + return (target[0] - origin[0], target[1] - origin[1]) + + +def _round_point(pt): + return (otRound(pt[0]), otRound(pt[1])) + + +def _unit_vector(vec): + length = hypot(*vec) + if length == 0: + return None + return (vec[0] / length, vec[1] / length) + + +_CIRCLE_INSIDE_TOLERANCE = 1e-4 + + +# The unit vector's X and Y components are respectively +# U = (cos(α), sin(α)) +# where α is the angle between the unit vector and the positive x axis. +_UNIT_VECTOR_THRESHOLD = cos(3 / 8 * pi) # == sin(1/8 * pi) == 0.38268343236508984 + + +def _rounding_offset(direction): + # Return 2-tuple of -/+ 1.0 or 0.0 approximately based on the direction vector. + # We divide the unit circle in 8 equal slices oriented towards the cardinal + # (N, E, S, W) and intermediate (NE, SE, SW, NW) directions. To each slice we + # map one of the possible cases: -1, 0, +1 for either X and Y coordinate. + # E.g. Return (+1.0, -1.0) if unit vector is oriented towards SE, or + # (-1.0, 0.0) if it's pointing West, etc. + uv = _unit_vector(direction) + if not uv: + return (0, 0) + + result = [] + for uv_component in uv: + if -_UNIT_VECTOR_THRESHOLD <= uv_component < _UNIT_VECTOR_THRESHOLD: + # unit vector component near 0: direction almost orthogonal to the + # direction of the current axis, thus keep coordinate unchanged + result.append(0) + else: + # nudge coord by +/- 1.0 in direction of unit vector + result.append(copysign(1.0, uv_component)) + return tuple(result) + + +class Circle: + def __init__(self, centre, radius): + self.centre = centre + self.radius = radius + + def __repr__(self): + return f"Circle(centre={self.centre}, radius={self.radius})" + + def round(self): + return Circle(_round_point(self.centre), otRound(self.radius)) + + def inside(self, outer_circle, tolerance=_CIRCLE_INSIDE_TOLERANCE): + dist = self.radius + hypot(*_vector_between(self.centre, outer_circle.centre)) + return ( + isclose(outer_circle.radius, dist, rel_tol=_CIRCLE_INSIDE_TOLERANCE) + or outer_circle.radius > dist + ) + + def concentric(self, other): + return self.centre == other.centre + + def move(self, dx, dy): + self.centre = (self.centre[0] + dx, self.centre[1] + dy) + + +def round_start_circle_stable_containment(c0, r0, c1, r1): + """Round start circle so that it stays inside/outside end circle after rounding. + + The rounding of circle coordinates to integers may cause an abrupt change + if the start circle c0 is so close to the end circle c1's perimiter that + it ends up falling outside (or inside) as a result of the rounding. + To keep the gradient unchanged, we nudge it in the right direction. + + See: + https://github.com/googlefonts/colr-gradients-spec/issues/204 + https://github.com/googlefonts/picosvg/issues/158 + """ + start, end = Circle(c0, r0), Circle(c1, r1) + + inside_before_round = start.inside(end) + + round_start = start.round() + round_end = end.round() + inside_after_round = round_start.inside(round_end) + + if inside_before_round == inside_after_round: + return round_start + elif inside_after_round: + # start was outside before rounding: we need to push start away from end + direction = _vector_between(round_end.centre, round_start.centre) + radius_delta = +1.0 + else: + # start was inside before rounding: we need to push start towards end + direction = _vector_between(round_start.centre, round_end.centre) + radius_delta = -1.0 + dx, dy = _rounding_offset(direction) + + # At most 2 iterations ought to be enough to converge. Before the loop, we + # know the start circle didn't keep containment after normal rounding; thus + # we continue adjusting by -/+ 1.0 until containment is restored. + # Normal rounding can at most move each coordinates -/+0.5; in the worst case + # both the start and end circle's centres and radii will be rounded in opposite + # directions, e.g. when they move along a 45 degree diagonal: + # c0 = (1.5, 1.5) ===> (2.0, 2.0) + # r0 = 0.5 ===> 1.0 + # c1 = (0.499, 0.499) ===> (0.0, 0.0) + # r1 = 2.499 ===> 2.0 + # In this example, the relative distance between the circles, calculated + # as r1 - (r0 + distance(c0, c1)) is initially 0.57437 (c0 is inside c1), and + # -1.82842 after rounding (c0 is now outside c1). Nudging c0 by -1.0 on both + # x and y axes moves it towards c1 by hypot(-1.0, -1.0) = 1.41421. Two of these + # moves cover twice that distance, which is enough to restore containment. + max_attempts = 2 + for _ in range(max_attempts): + if round_start.concentric(round_end): + # can't move c0 towards c1 (they are the same), so we change the radius + round_start.radius += radius_delta + assert round_start.radius >= 0 + else: + round_start.move(dx, dy) + if inside_before_round == round_start.inside(round_end): + break + else: # likely a bug + raise AssertionError( + f"Rounding circle {start} " + f"{'inside' if inside_before_round else 'outside'} " + f"{end} failed after {max_attempts} attempts!" + ) + + return round_start diff --git a/py311/lib/python3.11/site-packages/fontTools/colorLib/table_builder.py b/py311/lib/python3.11/site-packages/fontTools/colorLib/table_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..f1e182c432eae48545b2264d57d58c754ff6cd30 --- /dev/null +++ b/py311/lib/python3.11/site-packages/fontTools/colorLib/table_builder.py @@ -0,0 +1,223 @@ +""" +colorLib.table_builder: Generic helper for filling in BaseTable derivatives from tuples and maps and such. + +""" + +import collections +import enum +from fontTools.ttLib.tables.otBase import ( + BaseTable, + FormatSwitchingBaseTable, + UInt8FormatSwitchingBaseTable, +) +from fontTools.ttLib.tables.otConverters import ( + ComputedInt, + SimpleValue, + Struct, + Short, + UInt8, + UShort, + IntValue, + FloatValue, + OptionalValue, +) +from fontTools.misc.roundTools import otRound + + +class BuildCallback(enum.Enum): + """Keyed on (BEFORE_BUILD, class[, Format if available]). + Receives (dest, source). + Should return (dest, source), which can be new objects. + """ + + BEFORE_BUILD = enum.auto() + + """Keyed on (AFTER_BUILD, class[, Format if available]). + Receives (dest). + Should return dest, which can be a new object. + """ + AFTER_BUILD = enum.auto() + + """Keyed on (CREATE_DEFAULT, class[, Format if available]). + Receives no arguments. + Should return a new instance of class. + """ + CREATE_DEFAULT = enum.auto() + + +def _assignable(convertersByName): + return {k: v for k, v in convertersByName.items() if not isinstance(v, ComputedInt)} + + +def _isNonStrSequence(value): + return isinstance(value, collections.abc.Sequence) and not isinstance(value, str) + + +def _split_format(cls, source): + if _isNonStrSequence(source): + assert len(source) > 0, f"{cls} needs at least format from {source}" + fmt, remainder = source[0], source[1:] + elif isinstance(source, collections.abc.Mapping): + assert "Format" in source, f"{cls} needs at least Format from {source}" + remainder = source.copy() + fmt = remainder.pop("Format") + else: + raise ValueError(f"Not sure how to populate {cls} from {source}") + + assert isinstance( + fmt, collections.abc.Hashable + ), f"{cls} Format is not hashable: {fmt!r}" + assert fmt in cls.convertersByName, f"{cls} invalid Format: {fmt!r}" + + return fmt, remainder + + +class TableBuilder: + """ + Helps to populate things derived from BaseTable from maps, tuples, etc. + + A table of lifecycle callbacks may be provided to add logic beyond what is possible + based on otData info for the target class. See BuildCallbacks. + """ + + def __init__(self, callbackTable=None): + if callbackTable is None: + callbackTable = {} + self._callbackTable = callbackTable + + def _convert(self, dest, field, converter, value): + enumClass = getattr(converter, "enumClass", None) + + if enumClass: + if isinstance(value, enumClass): + pass + elif isinstance(value, str): + try: + value = getattr(enumClass, value.upper()) + except AttributeError: + raise ValueError(f"{value} is not a valid {enumClass}") + else: + value = enumClass(value) + + elif isinstance(converter, IntValue): + value = otRound(value) + elif isinstance(converter, FloatValue): + value = float(value) + + elif isinstance(converter, Struct): + if converter.repeat: + if _isNonStrSequence(value): + value = [self.build(converter.tableClass, v) for v in value] + else: + value = [self.build(converter.tableClass, value)] + setattr(dest, converter.repeat, len(value)) + else: + value = self.build(converter.tableClass, value) + elif callable(converter): + value = converter(value) + + setattr(dest, field, value) + + def build(self, cls, source): + assert issubclass(cls, BaseTable) + + if isinstance(source, cls): + return source + + callbackKey = (cls,) + fmt = None + if issubclass(cls, FormatSwitchingBaseTable): + fmt, source = _split_format(cls, source) + callbackKey = (cls, fmt) + + dest = self._callbackTable.get( + (BuildCallback.CREATE_DEFAULT,) + callbackKey, lambda: cls() + )() + assert isinstance(dest, cls) + + convByName = _assignable(cls.convertersByName) + skippedFields = set() + + # For format switchers we need to resolve converters based on format + if issubclass(cls, FormatSwitchingBaseTable): + dest.Format = fmt + convByName = _assignable(convByName[dest.Format]) + skippedFields.add("Format") + + # Convert sequence => mapping so before thunk only has to handle one format + if _isNonStrSequence(source): + # Sequence (typically list or tuple) assumed to match fields in declaration order + assert len(source) <= len( + convByName + ), f"Sequence of {len(source)} too long for {cls}; expected <= {len(convByName)} values" + source = dict(zip(convByName.keys(), source)) + + dest, source = self._callbackTable.get( + (BuildCallback.BEFORE_BUILD,) + callbackKey, lambda d, s: (d, s) + )(dest, source) + + if isinstance(source, collections.abc.Mapping): + for field, value in source.items(): + if field in skippedFields: + continue + converter = convByName.get(field, None) + if not converter: + raise ValueError( + f"Unrecognized field {field} for {cls}; expected one of {sorted(convByName.keys())}" + ) + self._convert(dest, field, converter, value) + else: + # let's try as a 1-tuple + dest = self.build(cls, (source,)) + + for field, conv in convByName.items(): + if not hasattr(dest, field) and isinstance(conv, OptionalValue): + setattr(dest, field, conv.DEFAULT) + + dest = self._callbackTable.get( + (BuildCallback.AFTER_BUILD,) + callbackKey, lambda d: d + )(dest) + + return dest + + +class TableUnbuilder: + def __init__(self, callbackTable=None): + if callbackTable is None: + callbackTable = {} + self._callbackTable = callbackTable + + def unbuild(self, table): + assert isinstance(table, BaseTable) + + source = {} + + callbackKey = (type(table),) + if isinstance(table, FormatSwitchingBaseTable): + source["Format"] = int(table.Format) + callbackKey += (table.Format,) + + for converter in table.getConverters(): + if isinstance(converter, ComputedInt): + continue + value = getattr(table, converter.name) + + enumClass = getattr(converter, "enumClass", None) + if enumClass: + source[converter.name] = value.name.lower() + elif isinstance(converter, Struct): + if converter.repeat: + source[converter.name] = [self.unbuild(v) for v in value] + else: + source[converter.name] = self.unbuild(value) + elif isinstance(converter, SimpleValue): + # "simple" values (e.g. int, float, str) need no further un-building + source[converter.name] = value + else: + raise NotImplementedError( + "Don't know how unbuild {value!r} with {converter!r}" + ) + + source = self._callbackTable.get(callbackKey, lambda s: s)(source) + + return source diff --git a/py311/lib/python3.11/site-packages/fontTools/colorLib/unbuilder.py b/py311/lib/python3.11/site-packages/fontTools/colorLib/unbuilder.py new file mode 100644 index 0000000000000000000000000000000000000000..ac243550b8908aef120b395e740b9974559d65b5 --- /dev/null +++ b/py311/lib/python3.11/site-packages/fontTools/colorLib/unbuilder.py @@ -0,0 +1,81 @@ +from fontTools.ttLib.tables import otTables as ot +from .table_builder import TableUnbuilder + + +def unbuildColrV1(layerList, baseGlyphList): + layers = [] + if layerList: + layers = layerList.Paint + unbuilder = LayerListUnbuilder(layers) + return { + rec.BaseGlyph: unbuilder.unbuildPaint(rec.Paint) + for rec in baseGlyphList.BaseGlyphPaintRecord + } + + +def _flatten_layers(lst): + for paint in lst: + if paint["Format"] == ot.PaintFormat.PaintColrLayers: + yield from _flatten_layers(paint["Layers"]) + else: + yield paint + + +class LayerListUnbuilder: + def __init__(self, layers): + self.layers = layers + + callbacks = { + ( + ot.Paint, + ot.PaintFormat.PaintColrLayers, + ): self._unbuildPaintColrLayers, + } + self.tableUnbuilder = TableUnbuilder(callbacks) + + def unbuildPaint(self, paint): + assert isinstance(paint, ot.Paint) + return self.tableUnbuilder.unbuild(paint) + + def _unbuildPaintColrLayers(self, source): + assert source["Format"] == ot.PaintFormat.PaintColrLayers + + layers = list( + _flatten_layers( + [ + self.unbuildPaint(childPaint) + for childPaint in self.layers[ + source["FirstLayerIndex"] : source["FirstLayerIndex"] + + source["NumLayers"] + ] + ] + ) + ) + + if len(layers) == 1: + return layers[0] + + return {"Format": source["Format"], "Layers": layers} + + +if __name__ == "__main__": + from pprint import pprint + import sys + from fontTools.ttLib import TTFont + + try: + fontfile = sys.argv[1] + except IndexError: + sys.exit("usage: fonttools colorLib.unbuilder FONTFILE") + + font = TTFont(fontfile) + colr = font["COLR"] + if colr.version < 1: + sys.exit(f"error: No COLR table version=1 found in {fontfile}") + + colorGlyphs = unbuildColrV1( + colr.table.LayerList, + colr.table.BaseGlyphList, + ) + + pprint(colorGlyphs) diff --git a/py311/lib/python3.11/site-packages/fontTools/config/__init__.py b/py311/lib/python3.11/site-packages/fontTools/config/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ff0328a3d4593246336bc2a9dff938040fe62e36 --- /dev/null +++ b/py311/lib/python3.11/site-packages/fontTools/config/__init__.py @@ -0,0 +1,90 @@ +""" +Define all configuration options that can affect the working of fontTools +modules. E.g. optimization levels of varLib IUP, otlLib GPOS compression level, +etc. If this file gets too big, split it into smaller files per-module. + +An instance of the Config class can be attached to a TTFont object, so that +the various modules can access their configuration options from it. +""" + +from textwrap import dedent + +from fontTools.misc.configTools import * + + +class Config(AbstractConfig): + options = Options() + + +OPTIONS = Config.options + + +Config.register_option( + name="fontTools.otlLib.optimize.gpos:COMPRESSION_LEVEL", + help=dedent( + """\ + GPOS Lookup type 2 (PairPos) compression level: + 0 = do not attempt to compact PairPos lookups; + 1 to 8 = create at most 1 to 8 new subtables for each existing + subtable, provided that it would yield a 50%% file size saving; + 9 = create as many new subtables as needed to yield a file size saving. + Default: 0. + + This compaction aims to save file size, by splitting large class + kerning subtables (Format 2) that contain many zero values into + smaller and denser subtables. It's a trade-off between the overhead + of several subtables versus the sparseness of one big subtable. + + See the pull request: https://github.com/fonttools/fonttools/pull/2326 + """ + ), + default=0, + parse=int, + validate=lambda v: v in range(10), +) + +Config.register_option( + name="fontTools.ttLib.tables.otBase:USE_HARFBUZZ_REPACKER", + help=dedent( + """\ + FontTools tries to use the HarfBuzz Repacker to serialize GPOS/GSUB tables + if the uharfbuzz python bindings are importable, otherwise falls back to its + slower, less efficient serializer. Set to False to always use the latter. + Set to True to explicitly request the HarfBuzz Repacker (will raise an + error if uharfbuzz cannot be imported). + """ + ), + default=None, + parse=Option.parse_optional_bool, + validate=Option.validate_optional_bool, +) + +Config.register_option( + name="fontTools.otlLib.builder:WRITE_GPOS7", + help=dedent( + """\ + macOS before 13.2 didn’t support GPOS LookupType 7 (non-chaining + ContextPos lookups), so FontTools.otlLib.builder disables a file size + optimisation that would use LookupType 7 instead of 8 when there is no + chaining (no prefix or suffix). Set to True to enable the optimization. + """ + ), + default=False, + parse=Option.parse_optional_bool, + validate=Option.validate_optional_bool, +) + +Config.register_option( + name="fontTools.ttLib:OPTIMIZE_FONT_SPEED", + help=dedent( + """\ + Enable optimizations that prioritize speed over file size. This + mainly affects how glyf table and gvar / VARC tables are compiled. + The produced fonts will be larger, but rendering performance will + be improved with HarfBuzz and other text layout engines. + """ + ), + default=False, + parse=Option.parse_optional_bool, + validate=Option.validate_optional_bool, +) diff --git a/py311/lib/python3.11/site-packages/fontTools/cu2qu/__init__.py b/py311/lib/python3.11/site-packages/fontTools/cu2qu/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4ae6356e44e1fed074b6283bcb4365bf2b770529 --- /dev/null +++ b/py311/lib/python3.11/site-packages/fontTools/cu2qu/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .cu2qu import * diff --git a/py311/lib/python3.11/site-packages/fontTools/cu2qu/__main__.py b/py311/lib/python3.11/site-packages/fontTools/cu2qu/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..5205ffeef9b4d244bdbaa4c15fb0dc11a12b550e --- /dev/null +++ b/py311/lib/python3.11/site-packages/fontTools/cu2qu/__main__.py @@ -0,0 +1,6 @@ +import sys +from .cli import _main as main + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/py311/lib/python3.11/site-packages/fontTools/cu2qu/benchmark.py b/py311/lib/python3.11/site-packages/fontTools/cu2qu/benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..007f75d887e312b68a1859546629c6410070770d --- /dev/null +++ b/py311/lib/python3.11/site-packages/fontTools/cu2qu/benchmark.py @@ -0,0 +1,54 @@ +"""Benchmark the cu2qu algorithm performance.""" + +from .cu2qu import * +import random +import timeit + +MAX_ERR = 0.05 + + +def generate_curve(): + return [ + tuple(float(random.randint(0, 2048)) for coord in range(2)) + for point in range(4) + ] + + +def setup_curve_to_quadratic(): + return generate_curve(), MAX_ERR + + +def setup_curves_to_quadratic(): + num_curves = 3 + return ([generate_curve() for curve in range(num_curves)], [MAX_ERR] * num_curves) + + +def run_benchmark(module, function, setup_suffix="", repeat=5, number=1000): + setup_func = "setup_" + function + if setup_suffix: + print("%s with %s:" % (function, setup_suffix), end="") + setup_func += "_" + setup_suffix + else: + print("%s:" % function, end="") + + def wrapper(function, setup_func): + function = globals()[function] + setup_func = globals()[setup_func] + + def wrapped(): + return function(*setup_func()) + + return wrapped + + results = timeit.repeat(wrapper(function, setup_func), repeat=repeat, number=number) + print("\t%5.1fus" % (min(results) * 1000000.0 / number)) + + +def main(): + run_benchmark("cu2qu", "curve_to_quadratic") + run_benchmark("cu2qu", "curves_to_quadratic") + + +if __name__ == "__main__": + random.seed(1) + main() diff --git a/py311/lib/python3.11/site-packages/fontTools/cu2qu/cli.py b/py311/lib/python3.11/site-packages/fontTools/cu2qu/cli.py new file mode 100644 index 0000000000000000000000000000000000000000..ddc6450200820766a764c9c9b18b4b3c3c04d0e4 --- /dev/null +++ b/py311/lib/python3.11/site-packages/fontTools/cu2qu/cli.py @@ -0,0 +1,198 @@ +import os +import argparse +import logging +import shutil +import multiprocessing as mp +from contextlib import closing +from functools import partial + +import fontTools +from .ufo import font_to_quadratic, fonts_to_quadratic + +ufo_module = None +try: + import ufoLib2 as ufo_module +except ImportError: + try: + import defcon as ufo_module + except ImportError as e: + pass + + +logger = logging.getLogger("fontTools.cu2qu") + + +def _cpu_count(): + try: + return mp.cpu_count() + except NotImplementedError: # pragma: no cover + return 1 + + +def open_ufo(path): + if hasattr(ufo_module.Font, "open"): # ufoLib2 + return ufo_module.Font.open(path) + return ufo_module.Font(path) # defcon + + +def _font_to_quadratic(input_path, output_path=None, **kwargs): + ufo = open_ufo(input_path) + logger.info("Converting curves for %s", input_path) + if font_to_quadratic(ufo, **kwargs): + logger.info("Saving %s", output_path) + if output_path: + ufo.save(output_path) + else: + ufo.save() # save in-place + elif output_path: + _copytree(input_path, output_path) + + +def _samepath(path1, path2): + # TODO on python3+, there's os.path.samefile + path1 = os.path.normcase(os.path.abspath(os.path.realpath(path1))) + path2 = os.path.normcase(os.path.abspath(os.path.realpath(path2))) + return path1 == path2 + + +def _copytree(input_path, output_path): + if _samepath(input_path, output_path): + logger.debug("input and output paths are the same file; skipped copy") + return + if os.path.exists(output_path): + shutil.rmtree(output_path) + shutil.copytree(input_path, output_path) + + +def _main(args=None): + """Convert a UFO font from cubic to quadratic curves""" + parser = argparse.ArgumentParser(prog="cu2qu") + parser.add_argument("--version", action="version", version=fontTools.__version__) + parser.add_argument( + "infiles", + nargs="+", + metavar="INPUT", + help="one or more input UFO source file(s).", + ) + parser.add_argument("-v", "--verbose", action="count", default=0) + parser.add_argument( + "-e", + "--conversion-error", + type=float, + metavar="ERROR", + default=None, + help="maxiumum approximation error measured in EM (default: 0.001)", + ) + parser.add_argument( + "-m", + "--mixed", + default=False, + action="store_true", + help="whether to used mixed quadratic and cubic curves", + ) + parser.add_argument( + "--keep-direction", + dest="reverse_direction", + action="store_false", + help="do not reverse the contour direction", + ) + + mode_parser = parser.add_mutually_exclusive_group() + mode_parser.add_argument( + "-i", + "--interpolatable", + action="store_true", + help="whether curve conversion should keep interpolation compatibility", + ) + mode_parser.add_argument( + "-j", + "--jobs", + type=int, + nargs="?", + default=1, + const=_cpu_count(), + metavar="N", + help="Convert using N multiple processes (default: %(default)s)", + ) + + output_parser = parser.add_mutually_exclusive_group() + output_parser.add_argument( + "-o", + "--output-file", + default=None, + metavar="OUTPUT", + help=( + "output filename for the converted UFO. By default fonts are " + "modified in place. This only works with a single input." + ), + ) + output_parser.add_argument( + "-d", + "--output-dir", + default=None, + metavar="DIRECTORY", + help="output directory where to save converted UFOs", + ) + + options = parser.parse_args(args) + + if ufo_module is None: + parser.error("Either ufoLib2 or defcon are required to run this script.") + + if not options.verbose: + level = "WARNING" + elif options.verbose == 1: + level = "INFO" + else: + level = "DEBUG" + logging.basicConfig(level=level) + + if len(options.infiles) > 1 and options.output_file: + parser.error("-o/--output-file can't be used with multile inputs") + + if options.output_dir: + output_dir = options.output_dir + if not os.path.exists(output_dir): + os.mkdir(output_dir) + elif not os.path.isdir(output_dir): + parser.error("'%s' is not a directory" % output_dir) + output_paths = [ + os.path.join(output_dir, os.path.basename(p)) for p in options.infiles + ] + elif options.output_file: + output_paths = [options.output_file] + else: + # save in-place + output_paths = [None] * len(options.infiles) + + kwargs = dict( + dump_stats=options.verbose > 0, + max_err_em=options.conversion_error, + reverse_direction=options.reverse_direction, + all_quadratic=False if options.mixed else True, + ) + + if options.interpolatable: + logger.info("Converting curves compatibly") + ufos = [open_ufo(infile) for infile in options.infiles] + if fonts_to_quadratic(ufos, **kwargs): + for ufo, output_path in zip(ufos, output_paths): + logger.info("Saving %s", output_path) + if output_path: + ufo.save(output_path) + else: + ufo.save() + else: + for input_path, output_path in zip(options.infiles, output_paths): + if output_path: + _copytree(input_path, output_path) + else: + jobs = min(len(options.infiles), options.jobs) if options.jobs > 1 else 1 + if jobs > 1: + func = partial(_font_to_quadratic, **kwargs) + logger.info("Running %d parallel processes", jobs) + with closing(mp.Pool(jobs)) as pool: + pool.starmap(func, zip(options.infiles, output_paths)) + else: + for input_path, output_path in zip(options.infiles, output_paths): + _font_to_quadratic(input_path, output_path, **kwargs) diff --git a/py311/lib/python3.11/site-packages/fontTools/cu2qu/cu2qu.c b/py311/lib/python3.11/site-packages/fontTools/cu2qu/cu2qu.c new file mode 100644 index 0000000000000000000000000000000000000000..170811af57d34445079307e74bf06fee5c187389 --- /dev/null +++ b/py311/lib/python3.11/site-packages/fontTools/cu2qu/cu2qu.c @@ -0,0 +1,15817 @@ +/* Generated by Cython 3.2.2 */ + +/* BEGIN: Cython Metadata +{ + "distutils": { + "define_macros": [ + [ + "CYTHON_TRACE_NOGIL", + "1" + ] + ], + "name": "fontTools.cu2qu.cu2qu", + "sources": [ + "Lib/fontTools/cu2qu/cu2qu.py" + ] + }, + "module_name": "fontTools.cu2qu.cu2qu" +} +END: Cython Metadata */ + +#ifndef PY_SSIZE_T_CLEAN +#define PY_SSIZE_T_CLEAN +#endif /* PY_SSIZE_T_CLEAN */ +/* InitLimitedAPI */ +#if defined(Py_LIMITED_API) + #if !defined(CYTHON_LIMITED_API) + #define CYTHON_LIMITED_API 1 + #endif +#elif defined(CYTHON_LIMITED_API) + #ifdef _MSC_VER + #pragma message ("Limited API usage is enabled with 'CYTHON_LIMITED_API' but 'Py_LIMITED_API' does not define a Python target version. Consider setting 'Py_LIMITED_API' instead.") + #else + #warning Limited API usage is enabled with 'CYTHON_LIMITED_API' but 'Py_LIMITED_API' does not define a Python target version. Consider setting 'Py_LIMITED_API' instead. + #endif +#endif + +#include "Python.h" +#ifndef Py_PYTHON_H + #error Python headers needed to compile C extensions, please install development version of Python. +#elif PY_VERSION_HEX < 0x03080000 + #error Cython requires Python 3.8+. +#else +#define __PYX_ABI_VERSION "3_2_2" +#define CYTHON_HEX_VERSION 0x030202F0 +#define CYTHON_FUTURE_DIVISION 1 +/* CModulePreamble */ +#include +#ifndef offsetof + #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) +#endif +#if !defined(_WIN32) && !defined(WIN32) && !defined(MS_WINDOWS) + #ifndef __stdcall + #define __stdcall + #endif + #ifndef __cdecl + #define __cdecl + #endif + #ifndef __fastcall + #define __fastcall + #endif +#endif +#ifndef DL_IMPORT + #define DL_IMPORT(t) t +#endif +#ifndef DL_EXPORT + #define DL_EXPORT(t) t +#endif +#define __PYX_COMMA , +#ifndef PY_LONG_LONG + #define PY_LONG_LONG LONG_LONG +#endif +#ifndef Py_HUGE_VAL + #define Py_HUGE_VAL HUGE_VAL +#endif +#define __PYX_LIMITED_VERSION_HEX PY_VERSION_HEX +#if defined(GRAALVM_PYTHON) + /* For very preliminary testing purposes. Most variables are set the same as PyPy. + The existence of this section does not imply that anything works or is even tested */ + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #define CYTHON_COMPILING_IN_LIMITED_API 0 + #define CYTHON_COMPILING_IN_GRAAL 1 + #define CYTHON_COMPILING_IN_CPYTHON_FREETHREADING 0 + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #undef CYTHON_USE_TYPE_SPECS + #define CYTHON_USE_TYPE_SPECS 0 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #undef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 1 + #undef CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS + #define CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS 0 + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_ASSUME_SAFE_SIZE + #define CYTHON_ASSUME_SAFE_SIZE 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_GIL + #define CYTHON_FAST_GIL 0 + #undef CYTHON_METH_FASTCALL + #define CYTHON_METH_FASTCALL 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #ifndef CYTHON_PEP487_INIT_SUBCLASS + #define CYTHON_PEP487_INIT_SUBCLASS 1 + #endif + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 1 + #undef CYTHON_USE_MODULE_STATE + #define CYTHON_USE_MODULE_STATE 0 + #undef CYTHON_USE_SYS_MONITORING + #define CYTHON_USE_SYS_MONITORING 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #undef CYTHON_USE_AM_SEND + #define CYTHON_USE_AM_SEND 0 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 1 + #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC + #define CYTHON_UPDATE_DESCRIPTOR_DOC 0 + #endif + #undef CYTHON_USE_FREELISTS + #define CYTHON_USE_FREELISTS 0 + #undef CYTHON_IMMORTAL_CONSTANTS + #define CYTHON_IMMORTAL_CONSTANTS 0 +#elif defined(PYPY_VERSION) + #define CYTHON_COMPILING_IN_PYPY 1 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #define CYTHON_COMPILING_IN_LIMITED_API 0 + #define CYTHON_COMPILING_IN_GRAAL 0 + #define CYTHON_COMPILING_IN_CPYTHON_FREETHREADING 0 + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #ifndef CYTHON_USE_TYPE_SPECS + #define CYTHON_USE_TYPE_SPECS 0 + #endif + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #undef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 1 + #undef CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS + #define CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS 1 + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #ifndef CYTHON_ASSUME_SAFE_SIZE + #define CYTHON_ASSUME_SAFE_SIZE 1 + #endif + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_GIL + #define CYTHON_FAST_GIL 0 + #undef CYTHON_METH_FASTCALL + #define CYTHON_METH_FASTCALL 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #ifndef CYTHON_PEP487_INIT_SUBCLASS + #define CYTHON_PEP487_INIT_SUBCLASS 1 + #endif + #if PY_VERSION_HEX < 0x03090000 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #elif !defined(CYTHON_PEP489_MULTI_PHASE_INIT) + #define CYTHON_PEP489_MULTI_PHASE_INIT 1 + #endif + #undef CYTHON_USE_MODULE_STATE + #define CYTHON_USE_MODULE_STATE 0 + #undef CYTHON_USE_SYS_MONITORING + #define CYTHON_USE_SYS_MONITORING 0 + #ifndef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE (PYPY_VERSION_NUM >= 0x07030C00) + #endif + #undef CYTHON_USE_AM_SEND + #define CYTHON_USE_AM_SEND 0 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 + #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC + #define CYTHON_UPDATE_DESCRIPTOR_DOC (PYPY_VERSION_NUM >= 0x07031100) + #endif + #undef CYTHON_USE_FREELISTS + #define CYTHON_USE_FREELISTS 0 + #undef CYTHON_IMMORTAL_CONSTANTS + #define CYTHON_IMMORTAL_CONSTANTS 0 +#elif defined(CYTHON_LIMITED_API) + #ifdef Py_LIMITED_API + #undef __PYX_LIMITED_VERSION_HEX + #define __PYX_LIMITED_VERSION_HEX Py_LIMITED_API + #endif + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #define CYTHON_COMPILING_IN_LIMITED_API 1 + #define CYTHON_COMPILING_IN_GRAAL 0 + #define CYTHON_COMPILING_IN_CPYTHON_FREETHREADING 0 + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #undef CYTHON_USE_TYPE_SPECS + #define CYTHON_USE_TYPE_SPECS 1 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #ifndef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #endif + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS + #define CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS 0 + #endif + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_ASSUME_SAFE_SIZE + #define CYTHON_ASSUME_SAFE_SIZE 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_GIL + #define CYTHON_FAST_GIL 0 + #undef CYTHON_METH_FASTCALL + #define CYTHON_METH_FASTCALL (__PYX_LIMITED_VERSION_HEX >= 0x030C0000) + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #ifndef CYTHON_PEP487_INIT_SUBCLASS + #define CYTHON_PEP487_INIT_SUBCLASS 1 + #endif + #ifndef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 1 + #endif + #ifndef CYTHON_USE_MODULE_STATE + #define CYTHON_USE_MODULE_STATE 0 + #endif + #undef CYTHON_USE_SYS_MONITORING + #define CYTHON_USE_SYS_MONITORING 0 + #ifndef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #endif + #ifndef CYTHON_USE_AM_SEND + #define CYTHON_USE_AM_SEND (__PYX_LIMITED_VERSION_HEX >= 0x030A0000) + #endif + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 + #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC + #define CYTHON_UPDATE_DESCRIPTOR_DOC 0 + #endif + #ifndef CYTHON_USE_FREELISTS + #define CYTHON_USE_FREELISTS 1 + #endif + #undef CYTHON_IMMORTAL_CONSTANTS + #define CYTHON_IMMORTAL_CONSTANTS 0 +#else + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_CPYTHON 1 + #define CYTHON_COMPILING_IN_LIMITED_API 0 + #define CYTHON_COMPILING_IN_GRAAL 0 + #ifdef Py_GIL_DISABLED + #define CYTHON_COMPILING_IN_CPYTHON_FREETHREADING 1 + #else + #define CYTHON_COMPILING_IN_CPYTHON_FREETHREADING 0 + #endif + #if PY_VERSION_HEX < 0x030A0000 + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #elif !defined(CYTHON_USE_TYPE_SLOTS) + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #ifndef CYTHON_USE_TYPE_SPECS + #define CYTHON_USE_TYPE_SPECS 0 + #endif + #ifndef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 1 + #endif + #ifndef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 1 + #endif + #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #elif !defined(CYTHON_USE_PYLIST_INTERNALS) + #define CYTHON_USE_PYLIST_INTERNALS 1 + #endif + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING || PY_VERSION_HEX >= 0x030B00A2 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #elif !defined(CYTHON_USE_UNICODE_WRITER) + #define CYTHON_USE_UNICODE_WRITER 1 + #endif + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING + #undef CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS + #define CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS 1 + #elif !defined(CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS) + #define CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_ASSUME_SAFE_SIZE + #define CYTHON_ASSUME_SAFE_SIZE 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #ifndef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 1 + #endif + #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING + #undef CYTHON_FAST_GIL + #define CYTHON_FAST_GIL 0 + #elif !defined(CYTHON_FAST_GIL) + #define CYTHON_FAST_GIL (PY_VERSION_HEX < 0x030C00A6) + #endif + #ifndef CYTHON_METH_FASTCALL + #define CYTHON_METH_FASTCALL 1 + #endif + #ifndef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 1 + #endif + #ifndef CYTHON_PEP487_INIT_SUBCLASS + #define CYTHON_PEP487_INIT_SUBCLASS 1 + #endif + #ifndef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 1 + #endif + #ifndef CYTHON_USE_MODULE_STATE + #define CYTHON_USE_MODULE_STATE 0 + #endif + #ifndef CYTHON_USE_SYS_MONITORING + #define CYTHON_USE_SYS_MONITORING (PY_VERSION_HEX >= 0x030d00B1) + #endif + #ifndef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 1 + #endif + #ifndef CYTHON_USE_AM_SEND + #define CYTHON_USE_AM_SEND 1 + #endif + #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #elif !defined(CYTHON_USE_DICT_VERSIONS) + #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX < 0x030C00A5 && !CYTHON_USE_MODULE_STATE) + #endif + #ifndef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 1 + #endif + #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC + #define CYTHON_UPDATE_DESCRIPTOR_DOC 1 + #endif + #ifndef CYTHON_USE_FREELISTS + #define CYTHON_USE_FREELISTS (!CYTHON_COMPILING_IN_CPYTHON_FREETHREADING) + #endif + #if defined(CYTHON_IMMORTAL_CONSTANTS) && PY_VERSION_HEX < 0x030C0000 + #undef CYTHON_IMMORTAL_CONSTANTS + #define CYTHON_IMMORTAL_CONSTANTS 0 // definitely won't work + #elif !defined(CYTHON_IMMORTAL_CONSTANTS) + #define CYTHON_IMMORTAL_CONSTANTS (PY_VERSION_HEX >= 0x030C0000 && !CYTHON_USE_MODULE_STATE && CYTHON_COMPILING_IN_CPYTHON_FREETHREADING) + #endif +#endif +#ifndef CYTHON_COMPRESS_STRINGS + #define CYTHON_COMPRESS_STRINGS 1 +#endif +#ifndef CYTHON_FAST_PYCCALL +#define CYTHON_FAST_PYCCALL CYTHON_FAST_PYCALL +#endif +#ifndef CYTHON_VECTORCALL +#if CYTHON_COMPILING_IN_LIMITED_API +#define CYTHON_VECTORCALL (__PYX_LIMITED_VERSION_HEX >= 0x030C0000) +#else +#define CYTHON_VECTORCALL (CYTHON_FAST_PYCCALL) +#endif +#endif +#if CYTHON_USE_PYLONG_INTERNALS + #undef SHIFT + #undef BASE + #undef MASK + #ifdef SIZEOF_VOID_P + enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; + #endif +#endif +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif +#ifndef __has_cpp_attribute + #define __has_cpp_attribute(x) 0 +#endif +#ifndef CYTHON_RESTRICT + #if defined(__GNUC__) + #define CYTHON_RESTRICT __restrict__ + #elif defined(_MSC_VER) && _MSC_VER >= 1400 + #define CYTHON_RESTRICT __restrict + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_RESTRICT restrict + #else + #define CYTHON_RESTRICT + #endif +#endif +#ifndef CYTHON_UNUSED + #if defined(__cplusplus) + /* for clang __has_cpp_attribute(maybe_unused) is true even before C++17 + * but leads to warnings with -pedantic, since it is a C++17 feature */ + #if ((defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) || __cplusplus >= 201703L) + #if __has_cpp_attribute(maybe_unused) + #define CYTHON_UNUSED [[maybe_unused]] + #endif + #endif + #endif +#endif +#ifndef CYTHON_UNUSED +# if defined(__GNUC__) +# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +#endif +#ifndef CYTHON_UNUSED_VAR +# if defined(__cplusplus) + template void CYTHON_UNUSED_VAR( const T& ) { } +# else +# define CYTHON_UNUSED_VAR(x) (void)(x) +# endif +#endif +#ifndef CYTHON_MAYBE_UNUSED_VAR + #define CYTHON_MAYBE_UNUSED_VAR(x) CYTHON_UNUSED_VAR(x) +#endif +#ifndef CYTHON_NCP_UNUSED +# if CYTHON_COMPILING_IN_CPYTHON && !CYTHON_COMPILING_IN_CPYTHON_FREETHREADING +# define CYTHON_NCP_UNUSED +# else +# define CYTHON_NCP_UNUSED CYTHON_UNUSED +# endif +#endif +#ifndef CYTHON_USE_CPP_STD_MOVE + #if defined(__cplusplus) && (\ + __cplusplus >= 201103L || (defined(_MSC_VER) && _MSC_VER >= 1600)) + #define CYTHON_USE_CPP_STD_MOVE 1 + #else + #define CYTHON_USE_CPP_STD_MOVE 0 + #endif +#endif +#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) +#include +typedef uintptr_t __pyx_uintptr_t; +#ifndef CYTHON_FALLTHROUGH + #if defined(__cplusplus) + /* for clang __has_cpp_attribute(fallthrough) is true even before C++17 + * but leads to warnings with -pedantic, since it is a C++17 feature */ + #if ((defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) || __cplusplus >= 201703L) + #if __has_cpp_attribute(fallthrough) + #define CYTHON_FALLTHROUGH [[fallthrough]] + #endif + #endif + #ifndef CYTHON_FALLTHROUGH + #if __has_cpp_attribute(clang::fallthrough) + #define CYTHON_FALLTHROUGH [[clang::fallthrough]] + #elif __has_cpp_attribute(gnu::fallthrough) + #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] + #endif + #endif + #endif + #ifndef CYTHON_FALLTHROUGH + #if __has_attribute(fallthrough) + #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) + #else + #define CYTHON_FALLTHROUGH + #endif + #endif + #if defined(__clang__) && defined(__apple_build_version__) + #if __apple_build_version__ < 7000000 + #undef CYTHON_FALLTHROUGH + #define CYTHON_FALLTHROUGH + #endif + #endif +#endif +#ifndef Py_UNREACHABLE + #define Py_UNREACHABLE() assert(0); abort() +#endif +#ifdef __cplusplus + template + struct __PYX_IS_UNSIGNED_IMPL {static const bool value = T(0) < T(-1);}; + #define __PYX_IS_UNSIGNED(type) (__PYX_IS_UNSIGNED_IMPL::value) +#else + #define __PYX_IS_UNSIGNED(type) (((type)-1) > 0) +#endif +#if CYTHON_COMPILING_IN_PYPY == 1 + #define __PYX_NEED_TP_PRINT_SLOT (PY_VERSION_HEX < 0x030A0000) +#else + #define __PYX_NEED_TP_PRINT_SLOT (PY_VERSION_HEX < 0x03090000) +#endif +#define __PYX_REINTERPRET_FUNCION(func_pointer, other_pointer) ((func_pointer)(void(*)(void))(other_pointer)) + +/* CInitCode */ +#ifndef CYTHON_INLINE + #if defined(__clang__) + #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) + #elif defined(__GNUC__) + #define CYTHON_INLINE __inline__ + #elif defined(_MSC_VER) + #define CYTHON_INLINE __inline + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_INLINE inline + #else + #define CYTHON_INLINE + #endif +#endif + +/* PythonCompatibility */ +#define __PYX_BUILD_PY_SSIZE_T "n" +#define CYTHON_FORMAT_SSIZE_T "z" +#define __Pyx_BUILTIN_MODULE_NAME "builtins" +#define __Pyx_DefaultClassType PyType_Type +#if CYTHON_COMPILING_IN_LIMITED_API + #ifndef CO_OPTIMIZED + static int CO_OPTIMIZED; + #endif + #ifndef CO_NEWLOCALS + static int CO_NEWLOCALS; + #endif + #ifndef CO_VARARGS + static int CO_VARARGS; + #endif + #ifndef CO_VARKEYWORDS + static int CO_VARKEYWORDS; + #endif + #ifndef CO_ASYNC_GENERATOR + static int CO_ASYNC_GENERATOR; + #endif + #ifndef CO_GENERATOR + static int CO_GENERATOR; + #endif + #ifndef CO_COROUTINE + static int CO_COROUTINE; + #endif +#else + #ifndef CO_COROUTINE + #define CO_COROUTINE 0x80 + #endif + #ifndef CO_ASYNC_GENERATOR + #define CO_ASYNC_GENERATOR 0x200 + #endif +#endif +static int __Pyx_init_co_variables(void); +#if PY_VERSION_HEX >= 0x030900A4 || defined(Py_IS_TYPE) + #define __Pyx_IS_TYPE(ob, type) Py_IS_TYPE(ob, type) +#else + #define __Pyx_IS_TYPE(ob, type) (((const PyObject*)ob)->ob_type == (type)) +#endif +#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_Is) + #define __Pyx_Py_Is(x, y) Py_Is(x, y) +#else + #define __Pyx_Py_Is(x, y) ((x) == (y)) +#endif +#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsNone) + #define __Pyx_Py_IsNone(ob) Py_IsNone(ob) +#else + #define __Pyx_Py_IsNone(ob) __Pyx_Py_Is((ob), Py_None) +#endif +#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsTrue) + #define __Pyx_Py_IsTrue(ob) Py_IsTrue(ob) +#else + #define __Pyx_Py_IsTrue(ob) __Pyx_Py_Is((ob), Py_True) +#endif +#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsFalse) + #define __Pyx_Py_IsFalse(ob) Py_IsFalse(ob) +#else + #define __Pyx_Py_IsFalse(ob) __Pyx_Py_Is((ob), Py_False) +#endif +#define __Pyx_NoneAsNull(obj) (__Pyx_Py_IsNone(obj) ? NULL : (obj)) +#if PY_VERSION_HEX >= 0x030900F0 && !CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyObject_GC_IsFinalized(o) PyObject_GC_IsFinalized(o) +#else + #define __Pyx_PyObject_GC_IsFinalized(o) _PyGC_FINALIZED(o) +#endif +#ifndef Py_TPFLAGS_CHECKTYPES + #define Py_TPFLAGS_CHECKTYPES 0 +#endif +#ifndef Py_TPFLAGS_HAVE_INDEX + #define Py_TPFLAGS_HAVE_INDEX 0 +#endif +#ifndef Py_TPFLAGS_HAVE_NEWBUFFER + #define Py_TPFLAGS_HAVE_NEWBUFFER 0 +#endif +#ifndef Py_TPFLAGS_HAVE_FINALIZE + #define Py_TPFLAGS_HAVE_FINALIZE 0 +#endif +#ifndef Py_TPFLAGS_SEQUENCE + #define Py_TPFLAGS_SEQUENCE 0 +#endif +#ifndef Py_TPFLAGS_MAPPING + #define Py_TPFLAGS_MAPPING 0 +#endif +#ifndef Py_TPFLAGS_IMMUTABLETYPE + #define Py_TPFLAGS_IMMUTABLETYPE (1UL << 8) +#endif +#ifndef Py_TPFLAGS_DISALLOW_INSTANTIATION + #define Py_TPFLAGS_DISALLOW_INSTANTIATION (1UL << 7) +#endif +#ifndef METH_STACKLESS + #define METH_STACKLESS 0 +#endif +#ifndef METH_FASTCALL + #ifndef METH_FASTCALL + #define METH_FASTCALL 0x80 + #endif + typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); + typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, + Py_ssize_t nargs, PyObject *kwnames); +#else + #if PY_VERSION_HEX >= 0x030d00A4 + # define __Pyx_PyCFunctionFast PyCFunctionFast + # define __Pyx_PyCFunctionFastWithKeywords PyCFunctionFastWithKeywords + #else + # define __Pyx_PyCFunctionFast _PyCFunctionFast + # define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords + #endif +#endif +#if CYTHON_METH_FASTCALL + #define __Pyx_METH_FASTCALL METH_FASTCALL + #define __Pyx_PyCFunction_FastCall __Pyx_PyCFunctionFast + #define __Pyx_PyCFunction_FastCallWithKeywords __Pyx_PyCFunctionFastWithKeywords +#else + #define __Pyx_METH_FASTCALL METH_VARARGS + #define __Pyx_PyCFunction_FastCall PyCFunction + #define __Pyx_PyCFunction_FastCallWithKeywords PyCFunctionWithKeywords +#endif +#if CYTHON_VECTORCALL + #define __pyx_vectorcallfunc vectorcallfunc + #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET PY_VECTORCALL_ARGUMENTS_OFFSET + #define __Pyx_PyVectorcall_NARGS(n) PyVectorcall_NARGS((size_t)(n)) +#else + #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET 0 + #define __Pyx_PyVectorcall_NARGS(n) ((Py_ssize_t)(n)) +#endif +#if PY_VERSION_HEX >= 0x030900B1 +#define __Pyx_PyCFunction_CheckExact(func) PyCFunction_CheckExact(func) +#else +#define __Pyx_PyCFunction_CheckExact(func) PyCFunction_Check(func) +#endif +#define __Pyx_CyOrPyCFunction_Check(func) PyCFunction_Check(func) +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_CyOrPyCFunction_GET_FUNCTION(func) (((PyCFunctionObject*)(func))->m_ml->ml_meth) +#elif !CYTHON_COMPILING_IN_LIMITED_API +#define __Pyx_CyOrPyCFunction_GET_FUNCTION(func) PyCFunction_GET_FUNCTION(func) +#endif +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_CyOrPyCFunction_GET_FLAGS(func) (((PyCFunctionObject*)(func))->m_ml->ml_flags) +static CYTHON_INLINE PyObject* __Pyx_CyOrPyCFunction_GET_SELF(PyObject *func) { + return (__Pyx_CyOrPyCFunction_GET_FLAGS(func) & METH_STATIC) ? NULL : ((PyCFunctionObject*)func)->m_self; +} +#endif +static CYTHON_INLINE int __Pyx__IsSameCFunction(PyObject *func, void (*cfunc)(void)) { +#if CYTHON_COMPILING_IN_LIMITED_API + return PyCFunction_Check(func) && PyCFunction_GetFunction(func) == (PyCFunction) cfunc; +#else + return PyCFunction_Check(func) && PyCFunction_GET_FUNCTION(func) == (PyCFunction) cfunc; +#endif +} +#define __Pyx_IsSameCFunction(func, cfunc) __Pyx__IsSameCFunction(func, cfunc) +#if PY_VERSION_HEX < 0x03090000 || (CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX < 0x030A0000) + #define __Pyx_PyType_FromModuleAndSpec(m, s, b) ((void)m, PyType_FromSpecWithBases(s, b)) + typedef PyObject *(*__Pyx_PyCMethod)(PyObject *, PyTypeObject *, PyObject *const *, size_t, PyObject *); +#else + #define __Pyx_PyType_FromModuleAndSpec(m, s, b) PyType_FromModuleAndSpec(m, s, b) + #define __Pyx_PyCMethod PyCMethod +#endif +#ifndef METH_METHOD + #define METH_METHOD 0x200 +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) + #define PyObject_Malloc(s) PyMem_Malloc(s) + #define PyObject_Free(p) PyMem_Free(p) + #define PyObject_Realloc(p) PyMem_Realloc(p) +#endif +#if CYTHON_COMPILING_IN_LIMITED_API + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) +#elif CYTHON_COMPILING_IN_GRAAL && defined(GRAALPY_VERSION_NUM) && GRAALPY_VERSION_NUM > 0x19000000 + #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) GraalPyFrame_SetLineNumber((frame), (lineno)) +#elif CYTHON_COMPILING_IN_GRAAL + #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) _PyFrame_SetLineNumber((frame), (lineno)) +#else + #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) +#endif +#if CYTHON_COMPILING_IN_LIMITED_API + #define __Pyx_PyThreadState_Current PyThreadState_Get() +#elif !CYTHON_FAST_THREAD_STATE + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#elif PY_VERSION_HEX >= 0x030d00A1 + #define __Pyx_PyThreadState_Current PyThreadState_GetUnchecked() +#else + #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() +#endif +#if CYTHON_USE_MODULE_STATE +static CYTHON_INLINE void *__Pyx__PyModule_GetState(PyObject *op) +{ + void *result; + result = PyModule_GetState(op); + if (!result) + Py_FatalError("Couldn't find the module state"); + return result; +} +#define __Pyx_PyModule_GetState(o) (__pyx_mstatetype *)__Pyx__PyModule_GetState(o) +#else +#define __Pyx_PyModule_GetState(op) ((void)op,__pyx_mstate_global) +#endif +#define __Pyx_PyObject_GetSlot(obj, name, func_ctype) __Pyx_PyType_GetSlot(Py_TYPE((PyObject *) obj), name, func_ctype) +#define __Pyx_PyObject_TryGetSlot(obj, name, func_ctype) __Pyx_PyType_TryGetSlot(Py_TYPE(obj), name, func_ctype) +#define __Pyx_PyObject_GetSubSlot(obj, sub, name, func_ctype) __Pyx_PyType_GetSubSlot(Py_TYPE(obj), sub, name, func_ctype) +#define __Pyx_PyObject_TryGetSubSlot(obj, sub, name, func_ctype) __Pyx_PyType_TryGetSubSlot(Py_TYPE(obj), sub, name, func_ctype) +#if CYTHON_USE_TYPE_SLOTS + #define __Pyx_PyType_GetSlot(type, name, func_ctype) ((type)->name) + #define __Pyx_PyType_TryGetSlot(type, name, func_ctype) __Pyx_PyType_GetSlot(type, name, func_ctype) + #define __Pyx_PyType_GetSubSlot(type, sub, name, func_ctype) (((type)->sub) ? ((type)->sub->name) : NULL) + #define __Pyx_PyType_TryGetSubSlot(type, sub, name, func_ctype) __Pyx_PyType_GetSubSlot(type, sub, name, func_ctype) +#else + #define __Pyx_PyType_GetSlot(type, name, func_ctype) ((func_ctype) PyType_GetSlot((type), Py_##name)) + #define __Pyx_PyType_TryGetSlot(type, name, func_ctype)\ + ((__PYX_LIMITED_VERSION_HEX >= 0x030A0000 ||\ + (PyType_GetFlags(type) & Py_TPFLAGS_HEAPTYPE) || __Pyx_get_runtime_version() >= 0x030A0000) ?\ + __Pyx_PyType_GetSlot(type, name, func_ctype) : NULL) + #define __Pyx_PyType_GetSubSlot(obj, sub, name, func_ctype) __Pyx_PyType_GetSlot(obj, name, func_ctype) + #define __Pyx_PyType_TryGetSubSlot(obj, sub, name, func_ctype) __Pyx_PyType_TryGetSlot(obj, name, func_ctype) +#endif +#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) +#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) +#else +#define __Pyx_PyDict_NewPresized(n) PyDict_New() +#endif +#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) +#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) +#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_UNICODE_INTERNALS +#define __Pyx_PyDict_GetItemStrWithError(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) +static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStr(PyObject *dict, PyObject *name) { + PyObject *res = __Pyx_PyDict_GetItemStrWithError(dict, name); + if (res == NULL) PyErr_Clear(); + return res; +} +#elif !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07020000 +#define __Pyx_PyDict_GetItemStrWithError PyDict_GetItemWithError +#define __Pyx_PyDict_GetItemStr PyDict_GetItem +#else +static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStrWithError(PyObject *dict, PyObject *name) { +#if CYTHON_COMPILING_IN_PYPY + return PyDict_GetItem(dict, name); +#else + PyDictEntry *ep; + PyDictObject *mp = (PyDictObject*) dict; + long hash = ((PyStringObject *) name)->ob_shash; + assert(hash != -1); + ep = (mp->ma_lookup)(mp, name, hash); + if (ep == NULL) { + return NULL; + } + return ep->me_value; +#endif +} +#define __Pyx_PyDict_GetItemStr PyDict_GetItem +#endif +#if CYTHON_USE_TYPE_SLOTS + #define __Pyx_PyType_GetFlags(tp) (((PyTypeObject *)tp)->tp_flags) + #define __Pyx_PyType_HasFeature(type, feature) ((__Pyx_PyType_GetFlags(type) & (feature)) != 0) +#else + #define __Pyx_PyType_GetFlags(tp) (PyType_GetFlags((PyTypeObject *)tp)) + #define __Pyx_PyType_HasFeature(type, feature) PyType_HasFeature(type, feature) +#endif +#define __Pyx_PyObject_GetIterNextFunc(iterator) __Pyx_PyObject_GetSlot(iterator, tp_iternext, iternextfunc) +#if CYTHON_USE_TYPE_SPECS +#define __Pyx_PyHeapTypeObject_GC_Del(obj) {\ + PyTypeObject *type = Py_TYPE((PyObject*)obj);\ + assert(__Pyx_PyType_HasFeature(type, Py_TPFLAGS_HEAPTYPE));\ + PyObject_GC_Del(obj);\ + Py_DECREF(type);\ +} +#else +#define __Pyx_PyHeapTypeObject_GC_Del(obj) PyObject_GC_Del(obj) +#endif +#if CYTHON_COMPILING_IN_LIMITED_API + #define __Pyx_PyUnicode_READY(op) (0) + #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_ReadChar(u, i) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((void)u, 1114111U) + #define __Pyx_PyUnicode_KIND(u) ((void)u, (0)) + #define __Pyx_PyUnicode_DATA(u) ((void*)u) + #define __Pyx_PyUnicode_READ(k, d, i) ((void)k, PyUnicode_ReadChar((PyObject*)(d), i)) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GetLength(u)) +#else + #if PY_VERSION_HEX >= 0x030C0000 + #define __Pyx_PyUnicode_READY(op) (0) + #else + #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ + 0 : _PyUnicode_Ready((PyObject *)(op))) + #endif + #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) + #define __Pyx_PyUnicode_KIND(u) ((int)PyUnicode_KIND(u)) + #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) + #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, (Py_UCS4) ch) + #if PY_VERSION_HEX >= 0x030C0000 + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) + #else + #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000 + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length)) + #else + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) + #endif + #endif +#endif +#if CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) +#else + #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ + PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) +#endif +#if CYTHON_COMPILING_IN_PYPY + #if !defined(PyUnicode_DecodeUnicodeEscape) + #define PyUnicode_DecodeUnicodeEscape(s, size, errors) PyUnicode_Decode(s, size, "unicode_escape", errors) + #endif + #if !defined(PyUnicode_Contains) + #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) + #endif + #if !defined(PyByteArray_Check) + #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) + #endif + #if !defined(PyObject_Format) + #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) + #endif +#endif +#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000 + #define __Pyx_PySequence_ListKeepNew(obj)\ + (likely(PyList_CheckExact(obj) && PyUnstable_Object_IsUniquelyReferenced(obj)) ? __Pyx_NewRef(obj) : PySequence_List(obj)) +#elif CYTHON_COMPILING_IN_CPYTHON + #define __Pyx_PySequence_ListKeepNew(obj)\ + (likely(PyList_CheckExact(obj) && Py_REFCNT(obj) == 1) ? __Pyx_NewRef(obj) : PySequence_List(obj)) +#else + #define __Pyx_PySequence_ListKeepNew(obj) PySequence_List(obj) +#endif +#ifndef PySet_CheckExact + #define PySet_CheckExact(obj) __Pyx_IS_TYPE(obj, &PySet_Type) +#endif +#if PY_VERSION_HEX >= 0x030900A4 + #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) + #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) +#else + #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) + #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) +#endif +enum __Pyx_ReferenceSharing { + __Pyx_ReferenceSharing_DefinitelyUnique, // We created it so we know it's unshared - no need to check + __Pyx_ReferenceSharing_OwnStrongReference, + __Pyx_ReferenceSharing_FunctionArgument, + __Pyx_ReferenceSharing_SharedReference, // Never trust it to be unshared because it's a global or similar +}; +#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING && PY_VERSION_HEX >= 0x030E0000 +#define __Pyx_IS_UNIQUELY_REFERENCED(o, sharing)\ + (sharing == __Pyx_ReferenceSharing_DefinitelyUnique ? 1 :\ + (sharing == __Pyx_ReferenceSharing_FunctionArgument ? PyUnstable_Object_IsUniqueReferencedTemporary(o) :\ + (sharing == __Pyx_ReferenceSharing_OwnStrongReference ? PyUnstable_Object_IsUniquelyReferenced(o) : 0))) +#elif (CYTHON_COMPILING_IN_CPYTHON && !CYTHON_COMPILING_IN_CPYTHON_FREETHREADING) || CYTHON_COMPILING_IN_LIMITED_API +#define __Pyx_IS_UNIQUELY_REFERENCED(o, sharing) (((void)sharing), Py_REFCNT(o) == 1) +#else +#define __Pyx_IS_UNIQUELY_REFERENCED(o, sharing) (((void)o), ((void)sharing), 0) +#endif +#if CYTHON_AVOID_BORROWED_REFS || CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS + #if __PYX_LIMITED_VERSION_HEX >= 0x030d0000 + #define __Pyx_PyList_GetItemRef(o, i) PyList_GetItemRef(o, i) + #elif CYTHON_COMPILING_IN_LIMITED_API || !CYTHON_ASSUME_SAFE_MACROS + #define __Pyx_PyList_GetItemRef(o, i) (likely((i) >= 0) ? PySequence_GetItem(o, i) : (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) + #else + #define __Pyx_PyList_GetItemRef(o, i) PySequence_ITEM(o, i) + #endif +#elif CYTHON_COMPILING_IN_LIMITED_API || !CYTHON_ASSUME_SAFE_MACROS + #if __PYX_LIMITED_VERSION_HEX >= 0x030d0000 + #define __Pyx_PyList_GetItemRef(o, i) PyList_GetItemRef(o, i) + #else + #define __Pyx_PyList_GetItemRef(o, i) __Pyx_XNewRef(PyList_GetItem(o, i)) + #endif +#else + #define __Pyx_PyList_GetItemRef(o, i) __Pyx_NewRef(PyList_GET_ITEM(o, i)) +#endif +#if CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS && !CYTHON_COMPILING_IN_LIMITED_API && CYTHON_ASSUME_SAFE_MACROS + #define __Pyx_PyList_GetItemRefFast(o, i, unsafe_shared) (__Pyx_IS_UNIQUELY_REFERENCED(o, unsafe_shared) ?\ + __Pyx_NewRef(PyList_GET_ITEM(o, i)) : __Pyx_PyList_GetItemRef(o, i)) +#else + #define __Pyx_PyList_GetItemRefFast(o, i, unsafe_shared) __Pyx_PyList_GetItemRef(o, i) +#endif +#if __PYX_LIMITED_VERSION_HEX >= 0x030d0000 +#define __Pyx_PyDict_GetItemRef(dict, key, result) PyDict_GetItemRef(dict, key, result) +#elif CYTHON_AVOID_BORROWED_REFS || CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS +static CYTHON_INLINE int __Pyx_PyDict_GetItemRef(PyObject *dict, PyObject *key, PyObject **result) { + *result = PyObject_GetItem(dict, key); + if (*result == NULL) { + if (PyErr_ExceptionMatches(PyExc_KeyError)) { + PyErr_Clear(); + return 0; + } + return -1; + } + return 1; +} +#else +static CYTHON_INLINE int __Pyx_PyDict_GetItemRef(PyObject *dict, PyObject *key, PyObject **result) { + *result = PyDict_GetItemWithError(dict, key); + if (*result == NULL) { + return PyErr_Occurred() ? -1 : 0; + } + Py_INCREF(*result); + return 1; +} +#endif +#if defined(CYTHON_DEBUG_VISIT_CONST) && CYTHON_DEBUG_VISIT_CONST + #define __Pyx_VISIT_CONST(obj) Py_VISIT(obj) +#else + #define __Pyx_VISIT_CONST(obj) +#endif +#if CYTHON_ASSUME_SAFE_MACROS + #define __Pyx_PySequence_ITEM(o, i) PySequence_ITEM(o, i) + #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) + #define __Pyx_PyTuple_SET_ITEM(o, i, v) (PyTuple_SET_ITEM(o, i, v), (0)) + #define __Pyx_PyTuple_GET_ITEM(o, i) PyTuple_GET_ITEM(o, i) + #define __Pyx_PyList_SET_ITEM(o, i, v) (PyList_SET_ITEM(o, i, v), (0)) + #define __Pyx_PyList_GET_ITEM(o, i) PyList_GET_ITEM(o, i) +#else + #define __Pyx_PySequence_ITEM(o, i) PySequence_GetItem(o, i) + #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) + #define __Pyx_PyTuple_SET_ITEM(o, i, v) PyTuple_SetItem(o, i, v) + #define __Pyx_PyTuple_GET_ITEM(o, i) PyTuple_GetItem(o, i) + #define __Pyx_PyList_SET_ITEM(o, i, v) PyList_SetItem(o, i, v) + #define __Pyx_PyList_GET_ITEM(o, i) PyList_GetItem(o, i) +#endif +#if CYTHON_ASSUME_SAFE_SIZE + #define __Pyx_PyTuple_GET_SIZE(o) PyTuple_GET_SIZE(o) + #define __Pyx_PyList_GET_SIZE(o) PyList_GET_SIZE(o) + #define __Pyx_PySet_GET_SIZE(o) PySet_GET_SIZE(o) + #define __Pyx_PyBytes_GET_SIZE(o) PyBytes_GET_SIZE(o) + #define __Pyx_PyByteArray_GET_SIZE(o) PyByteArray_GET_SIZE(o) + #define __Pyx_PyUnicode_GET_LENGTH(o) PyUnicode_GET_LENGTH(o) +#else + #define __Pyx_PyTuple_GET_SIZE(o) PyTuple_Size(o) + #define __Pyx_PyList_GET_SIZE(o) PyList_Size(o) + #define __Pyx_PySet_GET_SIZE(o) PySet_Size(o) + #define __Pyx_PyBytes_GET_SIZE(o) PyBytes_Size(o) + #define __Pyx_PyByteArray_GET_SIZE(o) PyByteArray_Size(o) + #define __Pyx_PyUnicode_GET_LENGTH(o) PyUnicode_GetLength(o) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_InternFromString) + #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) +#endif +#define __Pyx_PyLong_FromHash_t PyLong_FromSsize_t +#define __Pyx_PyLong_AsHash_t __Pyx_PyIndex_AsSsize_t +#if __PYX_LIMITED_VERSION_HEX >= 0x030A0000 + #define __Pyx_PySendResult PySendResult +#else + typedef enum { + PYGEN_RETURN = 0, + PYGEN_ERROR = -1, + PYGEN_NEXT = 1, + } __Pyx_PySendResult; +#endif +#if CYTHON_COMPILING_IN_LIMITED_API || PY_VERSION_HEX < 0x030A00A3 + typedef __Pyx_PySendResult (*__Pyx_pyiter_sendfunc)(PyObject *iter, PyObject *value, PyObject **result); +#else + #define __Pyx_pyiter_sendfunc sendfunc +#endif +#if !CYTHON_USE_AM_SEND +#define __PYX_HAS_PY_AM_SEND 0 +#elif __PYX_LIMITED_VERSION_HEX >= 0x030A0000 +#define __PYX_HAS_PY_AM_SEND 1 +#else +#define __PYX_HAS_PY_AM_SEND 2 // our own backported implementation +#endif +#if __PYX_HAS_PY_AM_SEND < 2 + #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods +#else + typedef struct { + unaryfunc am_await; + unaryfunc am_aiter; + unaryfunc am_anext; + __Pyx_pyiter_sendfunc am_send; + } __Pyx_PyAsyncMethodsStruct; + #define __Pyx_SlotTpAsAsync(s) ((PyAsyncMethods*)(s)) +#endif +#if CYTHON_USE_AM_SEND && PY_VERSION_HEX < 0x030A00F0 + #define __Pyx_TPFLAGS_HAVE_AM_SEND (1UL << 21) +#else + #define __Pyx_TPFLAGS_HAVE_AM_SEND (0) +#endif +#if PY_VERSION_HEX >= 0x03090000 +#define __Pyx_PyInterpreterState_Get() PyInterpreterState_Get() +#else +#define __Pyx_PyInterpreterState_Get() PyThreadState_Get()->interp +#endif +#if CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030A0000 +#ifdef __cplusplus +extern "C" +#endif +PyAPI_FUNC(void *) PyMem_Calloc(size_t nelem, size_t elsize); +#endif +#if CYTHON_COMPILING_IN_LIMITED_API +static int __Pyx_init_co_variable(PyObject *inspect, const char* name, int *write_to) { + int value; + PyObject *py_value = PyObject_GetAttrString(inspect, name); + if (!py_value) return 0; + value = (int) PyLong_AsLong(py_value); + Py_DECREF(py_value); + *write_to = value; + return value != -1 || !PyErr_Occurred(); +} +static int __Pyx_init_co_variables(void) { + PyObject *inspect; + int result; + inspect = PyImport_ImportModule("inspect"); + result = +#if !defined(CO_OPTIMIZED) + __Pyx_init_co_variable(inspect, "CO_OPTIMIZED", &CO_OPTIMIZED) && +#endif +#if !defined(CO_NEWLOCALS) + __Pyx_init_co_variable(inspect, "CO_NEWLOCALS", &CO_NEWLOCALS) && +#endif +#if !defined(CO_VARARGS) + __Pyx_init_co_variable(inspect, "CO_VARARGS", &CO_VARARGS) && +#endif +#if !defined(CO_VARKEYWORDS) + __Pyx_init_co_variable(inspect, "CO_VARKEYWORDS", &CO_VARKEYWORDS) && +#endif +#if !defined(CO_ASYNC_GENERATOR) + __Pyx_init_co_variable(inspect, "CO_ASYNC_GENERATOR", &CO_ASYNC_GENERATOR) && +#endif +#if !defined(CO_GENERATOR) + __Pyx_init_co_variable(inspect, "CO_GENERATOR", &CO_GENERATOR) && +#endif +#if !defined(CO_COROUTINE) + __Pyx_init_co_variable(inspect, "CO_COROUTINE", &CO_COROUTINE) && +#endif + 1; + Py_DECREF(inspect); + return result ? 0 : -1; +} +#else +static int __Pyx_init_co_variables(void) { + return 0; // It's a limited API-only feature +} +#endif + +/* MathInitCode */ +#if defined(_WIN32) || defined(WIN32) || defined(MS_WINDOWS) + #ifndef _USE_MATH_DEFINES + #define _USE_MATH_DEFINES + #endif +#endif +#include +#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) +#define __Pyx_truncl trunc +#else +#define __Pyx_truncl truncl +#endif + +#ifndef CYTHON_CLINE_IN_TRACEBACK_RUNTIME +#define CYTHON_CLINE_IN_TRACEBACK_RUNTIME 0 +#endif +#ifndef CYTHON_CLINE_IN_TRACEBACK +#define CYTHON_CLINE_IN_TRACEBACK CYTHON_CLINE_IN_TRACEBACK_RUNTIME +#endif +#if CYTHON_CLINE_IN_TRACEBACK +#define __PYX_MARK_ERR_POS(f_index, lineno) { __pyx_filename = __pyx_f[f_index]; (void) __pyx_filename; __pyx_lineno = lineno; (void) __pyx_lineno; __pyx_clineno = __LINE__; (void) __pyx_clineno; } +#else +#define __PYX_MARK_ERR_POS(f_index, lineno) { __pyx_filename = __pyx_f[f_index]; (void) __pyx_filename; __pyx_lineno = lineno; (void) __pyx_lineno; (void) __pyx_clineno; } +#endif +#define __PYX_ERR(f_index, lineno, Ln_error) \ + { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } + +#ifdef CYTHON_EXTERN_C + #undef __PYX_EXTERN_C + #define __PYX_EXTERN_C CYTHON_EXTERN_C +#elif defined(__PYX_EXTERN_C) + #ifdef _MSC_VER + #pragma message ("Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.") + #else + #warning Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead. + #endif +#else + #ifdef __cplusplus + #define __PYX_EXTERN_C extern "C" + #else + #define __PYX_EXTERN_C extern + #endif +#endif + +#define __PYX_HAVE__fontTools__cu2qu__cu2qu +#define __PYX_HAVE_API__fontTools__cu2qu__cu2qu +/* Early includes */ +#ifdef _OPENMP +#include +#endif /* _OPENMP */ + +#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) +#define CYTHON_WITHOUT_ASSERTIONS +#endif + +#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 +#define __PYX_DEFAULT_STRING_ENCODING "" +#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString +#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#define __Pyx_uchar_cast(c) ((unsigned char)c) +#define __Pyx_long_cast(x) ((long)x) +#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ + (sizeof(type) < sizeof(Py_ssize_t)) ||\ + (sizeof(type) > sizeof(Py_ssize_t) &&\ + likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX) &&\ + (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ + v == (type)PY_SSIZE_T_MIN))) ||\ + (sizeof(type) == sizeof(Py_ssize_t) &&\ + (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX))) ) +static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { + return (size_t) i < (size_t) limit; +} +#if defined (__cplusplus) && __cplusplus >= 201103L + #include + #define __Pyx_sst_abs(value) std::abs(value) +#elif SIZEOF_INT >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) abs(value) +#elif SIZEOF_LONG >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) labs(value) +#elif defined (_MSC_VER) + #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define __Pyx_sst_abs(value) llabs(value) +#elif defined (__GNUC__) + #define __Pyx_sst_abs(value) __builtin_llabs(value) +#else + #define __Pyx_sst_abs(value) ((value<0) ? -value : value) +#endif +static CYTHON_INLINE Py_ssize_t __Pyx_ssize_strlen(const char *s); +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); +static CYTHON_INLINE PyObject* __Pyx_PyByteArray_FromString(const char*); +#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) +#define __Pyx_PyBytes_FromString PyBytes_FromString +#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); +#if CYTHON_ASSUME_SAFE_MACROS + #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) + #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) + #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) + #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) + #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) + #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) + #define __Pyx_PyByteArray_AsString(s) PyByteArray_AS_STRING(s) +#else + #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AsString(s)) + #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AsString(s)) + #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AsString(s)) + #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AsString(s)) + #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AsString(s)) + #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AsString(s)) + #define __Pyx_PyByteArray_AsString(s) PyByteArray_AsString(s) +#endif +#define __Pyx_PyObject_AsWritableString(s) ((char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableSString(s) ((signed char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) +#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) +#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) +#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) +#define __Pyx_PyUnicode_FromOrdinal(o) PyUnicode_FromOrdinal((int)o) +#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode +static CYTHON_INLINE PyObject *__Pyx_NewRef(PyObject *obj) { +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030a0000 || defined(Py_NewRef) + return Py_NewRef(obj); +#else + Py_INCREF(obj); + return obj; +#endif +} +static CYTHON_INLINE PyObject *__Pyx_XNewRef(PyObject *obj) { +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030a0000 || defined(Py_XNewRef) + return Py_XNewRef(obj); +#else + Py_XINCREF(obj); + return obj; +#endif +} +static CYTHON_INLINE PyObject *__Pyx_Owned_Py_None(int b); +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); +static CYTHON_INLINE PyObject* __Pyx_PyNumber_Long(PyObject* x); +#define __Pyx_PySequence_Tuple(obj)\ + (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); +static CYTHON_INLINE PyObject * __Pyx_PyLong_FromSize_t(size_t); +static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject*); +#if CYTHON_ASSUME_SAFE_MACROS +#define __Pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) +#define __Pyx_PyFloat_AS_DOUBLE(x) PyFloat_AS_DOUBLE(x) +#else +#define __Pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) +#define __Pyx_PyFloat_AS_DOUBLE(x) PyFloat_AsDouble(x) +#endif +#define __Pyx_PyFloat_AsFloat(x) ((float) __Pyx_PyFloat_AsDouble(x)) +#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) +#if CYTHON_USE_PYLONG_INTERNALS + #if PY_VERSION_HEX >= 0x030C00A7 + #ifndef _PyLong_SIGN_MASK + #define _PyLong_SIGN_MASK 3 + #endif + #ifndef _PyLong_NON_SIZE_BITS + #define _PyLong_NON_SIZE_BITS 3 + #endif + #define __Pyx_PyLong_Sign(x) (((PyLongObject*)x)->long_value.lv_tag & _PyLong_SIGN_MASK) + #define __Pyx_PyLong_IsNeg(x) ((__Pyx_PyLong_Sign(x) & 2) != 0) + #define __Pyx_PyLong_IsNonNeg(x) (!__Pyx_PyLong_IsNeg(x)) + #define __Pyx_PyLong_IsZero(x) (__Pyx_PyLong_Sign(x) & 1) + #define __Pyx_PyLong_IsPos(x) (__Pyx_PyLong_Sign(x) == 0) + #define __Pyx_PyLong_CompactValueUnsigned(x) (__Pyx_PyLong_Digits(x)[0]) + #define __Pyx_PyLong_DigitCount(x) ((Py_ssize_t) (((PyLongObject*)x)->long_value.lv_tag >> _PyLong_NON_SIZE_BITS)) + #define __Pyx_PyLong_SignedDigitCount(x)\ + ((1 - (Py_ssize_t) __Pyx_PyLong_Sign(x)) * __Pyx_PyLong_DigitCount(x)) + #if defined(PyUnstable_Long_IsCompact) && defined(PyUnstable_Long_CompactValue) + #define __Pyx_PyLong_IsCompact(x) PyUnstable_Long_IsCompact((PyLongObject*) x) + #define __Pyx_PyLong_CompactValue(x) PyUnstable_Long_CompactValue((PyLongObject*) x) + #else + #define __Pyx_PyLong_IsCompact(x) (((PyLongObject*)x)->long_value.lv_tag < (2 << _PyLong_NON_SIZE_BITS)) + #define __Pyx_PyLong_CompactValue(x) ((1 - (Py_ssize_t) __Pyx_PyLong_Sign(x)) * (Py_ssize_t) __Pyx_PyLong_Digits(x)[0]) + #endif + typedef Py_ssize_t __Pyx_compact_pylong; + typedef size_t __Pyx_compact_upylong; + #else + #define __Pyx_PyLong_IsNeg(x) (Py_SIZE(x) < 0) + #define __Pyx_PyLong_IsNonNeg(x) (Py_SIZE(x) >= 0) + #define __Pyx_PyLong_IsZero(x) (Py_SIZE(x) == 0) + #define __Pyx_PyLong_IsPos(x) (Py_SIZE(x) > 0) + #define __Pyx_PyLong_CompactValueUnsigned(x) ((Py_SIZE(x) == 0) ? 0 : __Pyx_PyLong_Digits(x)[0]) + #define __Pyx_PyLong_DigitCount(x) __Pyx_sst_abs(Py_SIZE(x)) + #define __Pyx_PyLong_SignedDigitCount(x) Py_SIZE(x) + #define __Pyx_PyLong_IsCompact(x) (Py_SIZE(x) == 0 || Py_SIZE(x) == 1 || Py_SIZE(x) == -1) + #define __Pyx_PyLong_CompactValue(x)\ + ((Py_SIZE(x) == 0) ? (sdigit) 0 : ((Py_SIZE(x) < 0) ? -(sdigit)__Pyx_PyLong_Digits(x)[0] : (sdigit)__Pyx_PyLong_Digits(x)[0])) + typedef sdigit __Pyx_compact_pylong; + typedef digit __Pyx_compact_upylong; + #endif + #if PY_VERSION_HEX >= 0x030C00A5 + #define __Pyx_PyLong_Digits(x) (((PyLongObject*)x)->long_value.ob_digit) + #else + #define __Pyx_PyLong_Digits(x) (((PyLongObject*)x)->ob_digit) + #endif +#endif +#if __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 + #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) +#elif __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeASCII(c_str, size, NULL) +#else + #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) +#endif + + +/* Test for GCC > 2.95 */ +#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) + #define likely(x) __builtin_expect(!!(x), 1) + #define unlikely(x) __builtin_expect(!!(x), 0) +#else /* !__GNUC__ or GCC < 2.95 */ + #define likely(x) (x) + #define unlikely(x) (x) +#endif /* __GNUC__ */ +/* PretendToInitialize */ +#ifdef __cplusplus +#if __cplusplus > 201103L +#include +#endif +template +static void __Pyx_pretend_to_initialize(T* ptr) { +#if __cplusplus > 201103L + if ((std::is_trivially_default_constructible::value)) +#endif + *ptr = T(); + (void)ptr; +} +#else +static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } +#endif + + +#if !CYTHON_USE_MODULE_STATE +static PyObject *__pyx_m = NULL; +#endif +static int __pyx_lineno; +static int __pyx_clineno = 0; +static const char * const __pyx_cfilenm = __FILE__; +static const char *__pyx_filename; + +/* Header.proto */ +#if !defined(CYTHON_CCOMPLEX) + #if defined(__cplusplus) + #define CYTHON_CCOMPLEX 1 + #elif (defined(_Complex_I) && !defined(_MSC_VER)) || ((defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_COMPLEX__) && !defined(_MSC_VER)) + #define CYTHON_CCOMPLEX 1 + #else + #define CYTHON_CCOMPLEX 0 + #endif +#endif +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + #include + #else + #include + #endif +#endif +#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) + #undef _Complex_I + #define _Complex_I 1.0fj +#endif + +/* #### Code section: filename_table ### */ + +static const char* const __pyx_f[] = { + "Lib/fontTools/cu2qu/cu2qu.py", +}; +/* #### Code section: utility_code_proto_before_types ### */ +/* Atomics.proto (used by UnpackUnboundCMethod) */ +#include +#ifndef CYTHON_ATOMICS + #define CYTHON_ATOMICS 1 +#endif +#define __PYX_CYTHON_ATOMICS_ENABLED() CYTHON_ATOMICS +#define __PYX_GET_CYTHON_COMPILING_IN_CPYTHON_FREETHREADING() CYTHON_COMPILING_IN_CPYTHON_FREETHREADING +#define __pyx_atomic_int_type int +#define __pyx_nonatomic_int_type int +#if CYTHON_ATOMICS && (defined(__STDC_VERSION__) &&\ + (__STDC_VERSION__ >= 201112L) &&\ + !defined(__STDC_NO_ATOMICS__)) + #include +#elif CYTHON_ATOMICS && (defined(__cplusplus) && (\ + (__cplusplus >= 201103L) ||\ + (defined(_MSC_VER) && _MSC_VER >= 1700))) + #include +#endif +#if CYTHON_ATOMICS && (defined(__STDC_VERSION__) &&\ + (__STDC_VERSION__ >= 201112L) &&\ + !defined(__STDC_NO_ATOMICS__) &&\ + ATOMIC_INT_LOCK_FREE == 2) + #undef __pyx_atomic_int_type + #define __pyx_atomic_int_type atomic_int + #define __pyx_atomic_ptr_type atomic_uintptr_t + #define __pyx_nonatomic_ptr_type uintptr_t + #define __pyx_atomic_incr_relaxed(value) atomic_fetch_add_explicit(value, 1, memory_order_relaxed) + #define __pyx_atomic_incr_acq_rel(value) atomic_fetch_add_explicit(value, 1, memory_order_acq_rel) + #define __pyx_atomic_decr_acq_rel(value) atomic_fetch_sub_explicit(value, 1, memory_order_acq_rel) + #define __pyx_atomic_sub(value, arg) atomic_fetch_sub(value, arg) + #define __pyx_atomic_int_cmp_exchange(value, expected, desired) atomic_compare_exchange_strong(value, expected, desired) + #define __pyx_atomic_load(value) atomic_load(value) + #define __pyx_atomic_store(value, new_value) atomic_store(value, new_value) + #define __pyx_atomic_pointer_load_relaxed(value) atomic_load_explicit(value, memory_order_relaxed) + #define __pyx_atomic_pointer_load_acquire(value) atomic_load_explicit(value, memory_order_acquire) + #define __pyx_atomic_pointer_exchange(value, new_value) atomic_exchange(value, (__pyx_nonatomic_ptr_type)new_value) + #define __pyx_atomic_pointer_cmp_exchange(value, expected, desired) atomic_compare_exchange_strong(value, expected, desired) + #if defined(__PYX_DEBUG_ATOMICS) && defined(_MSC_VER) + #pragma message ("Using standard C atomics") + #elif defined(__PYX_DEBUG_ATOMICS) + #warning "Using standard C atomics" + #endif +#elif CYTHON_ATOMICS && (defined(__cplusplus) && (\ + (__cplusplus >= 201103L) ||\ +\ + (defined(_MSC_VER) && _MSC_VER >= 1700)) &&\ + ATOMIC_INT_LOCK_FREE == 2) + #undef __pyx_atomic_int_type + #define __pyx_atomic_int_type std::atomic_int + #define __pyx_atomic_ptr_type std::atomic_uintptr_t + #define __pyx_nonatomic_ptr_type uintptr_t + #define __pyx_atomic_incr_relaxed(value) std::atomic_fetch_add_explicit(value, 1, std::memory_order_relaxed) + #define __pyx_atomic_incr_acq_rel(value) std::atomic_fetch_add_explicit(value, 1, std::memory_order_acq_rel) + #define __pyx_atomic_decr_acq_rel(value) std::atomic_fetch_sub_explicit(value, 1, std::memory_order_acq_rel) + #define __pyx_atomic_sub(value, arg) std::atomic_fetch_sub(value, arg) + #define __pyx_atomic_int_cmp_exchange(value, expected, desired) std::atomic_compare_exchange_strong(value, expected, desired) + #define __pyx_atomic_load(value) std::atomic_load(value) + #define __pyx_atomic_store(value, new_value) std::atomic_store(value, new_value) + #define __pyx_atomic_pointer_load_relaxed(value) std::atomic_load_explicit(value, std::memory_order_relaxed) + #define __pyx_atomic_pointer_load_acquire(value) std::atomic_load_explicit(value, std::memory_order_acquire) + #define __pyx_atomic_pointer_exchange(value, new_value) std::atomic_exchange(value, (__pyx_nonatomic_ptr_type)new_value) + #define __pyx_atomic_pointer_cmp_exchange(value, expected, desired) std::atomic_compare_exchange_strong(value, expected, desired) + #if defined(__PYX_DEBUG_ATOMICS) && defined(_MSC_VER) + #pragma message ("Using standard C++ atomics") + #elif defined(__PYX_DEBUG_ATOMICS) + #warning "Using standard C++ atomics" + #endif +#elif CYTHON_ATOMICS && (__GNUC__ >= 5 || (__GNUC__ == 4 &&\ + (__GNUC_MINOR__ > 1 ||\ + (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ >= 2)))) + #define __pyx_atomic_ptr_type void* + #define __pyx_nonatomic_ptr_type void* + #define __pyx_atomic_incr_relaxed(value) __sync_fetch_and_add(value, 1) + #define __pyx_atomic_incr_acq_rel(value) __sync_fetch_and_add(value, 1) + #define __pyx_atomic_decr_acq_rel(value) __sync_fetch_and_sub(value, 1) + #define __pyx_atomic_sub(value, arg) __sync_fetch_and_sub(value, arg) + static CYTHON_INLINE int __pyx_atomic_int_cmp_exchange(__pyx_atomic_int_type* value, __pyx_nonatomic_int_type* expected, __pyx_nonatomic_int_type desired) { + __pyx_nonatomic_int_type old = __sync_val_compare_and_swap(value, *expected, desired); + int result = old == *expected; + *expected = old; + return result; + } + #define __pyx_atomic_load(value) __sync_fetch_and_add(value, 0) + #define __pyx_atomic_store(value, new_value) __sync_lock_test_and_set(value, new_value) + #define __pyx_atomic_pointer_load_relaxed(value) __sync_fetch_and_add(value, 0) + #define __pyx_atomic_pointer_load_acquire(value) __sync_fetch_and_add(value, 0) + #define __pyx_atomic_pointer_exchange(value, new_value) __sync_lock_test_and_set(value, (__pyx_atomic_ptr_type)new_value) + static CYTHON_INLINE int __pyx_atomic_pointer_cmp_exchange(__pyx_atomic_ptr_type* value, __pyx_nonatomic_ptr_type* expected, __pyx_nonatomic_ptr_type desired) { + __pyx_nonatomic_ptr_type old = __sync_val_compare_and_swap(value, *expected, desired); + int result = old == *expected; + *expected = old; + return result; + } + #ifdef __PYX_DEBUG_ATOMICS + #warning "Using GNU atomics" + #endif +#elif CYTHON_ATOMICS && defined(_MSC_VER) + #include + #undef __pyx_atomic_int_type + #define __pyx_atomic_int_type long + #define __pyx_atomic_ptr_type void* + #undef __pyx_nonatomic_int_type + #define __pyx_nonatomic_int_type long + #define __pyx_nonatomic_ptr_type void* + #pragma intrinsic (_InterlockedExchangeAdd, _InterlockedExchange, _InterlockedCompareExchange, _InterlockedCompareExchangePointer, _InterlockedExchangePointer) + #define __pyx_atomic_incr_relaxed(value) _InterlockedExchangeAdd(value, 1) + #define __pyx_atomic_incr_acq_rel(value) _InterlockedExchangeAdd(value, 1) + #define __pyx_atomic_decr_acq_rel(value) _InterlockedExchangeAdd(value, -1) + #define __pyx_atomic_sub(value, arg) _InterlockedExchangeAdd(value, -arg) + static CYTHON_INLINE int __pyx_atomic_int_cmp_exchange(__pyx_atomic_int_type* value, __pyx_nonatomic_int_type* expected, __pyx_nonatomic_int_type desired) { + __pyx_nonatomic_int_type old = _InterlockedCompareExchange(value, desired, *expected); + int result = old == *expected; + *expected = old; + return result; + } + #define __pyx_atomic_load(value) _InterlockedExchangeAdd(value, 0) + #define __pyx_atomic_store(value, new_value) _InterlockedExchange(value, new_value) + #define __pyx_atomic_pointer_load_relaxed(value) *(void * volatile *)value + #define __pyx_atomic_pointer_load_acquire(value) _InterlockedCompareExchangePointer(value, 0, 0) + #define __pyx_atomic_pointer_exchange(value, new_value) _InterlockedExchangePointer(value, (__pyx_atomic_ptr_type)new_value) + static CYTHON_INLINE int __pyx_atomic_pointer_cmp_exchange(__pyx_atomic_ptr_type* value, __pyx_nonatomic_ptr_type* expected, __pyx_nonatomic_ptr_type desired) { + __pyx_atomic_ptr_type old = _InterlockedCompareExchangePointer(value, desired, *expected); + int result = old == *expected; + *expected = old; + return result; + } + #ifdef __PYX_DEBUG_ATOMICS + #pragma message ("Using MSVC atomics") + #endif +#else + #undef CYTHON_ATOMICS + #define CYTHON_ATOMICS 0 + #ifdef __PYX_DEBUG_ATOMICS + #warning "Not using atomics" + #endif +#endif + +/* CriticalSectionsDefinition.proto (used by CriticalSections) */ +#if !CYTHON_COMPILING_IN_CPYTHON_FREETHREADING +#define __Pyx_PyCriticalSection void* +#define __Pyx_PyCriticalSection2 void* +#define __Pyx_PyCriticalSection_End(cs) +#define __Pyx_PyCriticalSection2_End(cs) +#else +#define __Pyx_PyCriticalSection PyCriticalSection +#define __Pyx_PyCriticalSection2 PyCriticalSection2 +#define __Pyx_PyCriticalSection_End PyCriticalSection_End +#define __Pyx_PyCriticalSection2_End PyCriticalSection2_End +#endif + +/* CriticalSections.proto (used by ParseKeywordsImpl) */ +#if !CYTHON_COMPILING_IN_CPYTHON_FREETHREADING +#define __Pyx_PyCriticalSection_Begin(cs, arg) (void)(cs) +#define __Pyx_PyCriticalSection2_Begin(cs, arg1, arg2) (void)(cs) +#else +#define __Pyx_PyCriticalSection_Begin PyCriticalSection_Begin +#define __Pyx_PyCriticalSection2_Begin PyCriticalSection2_Begin +#endif +#if PY_VERSION_HEX < 0x030d0000 || CYTHON_COMPILING_IN_LIMITED_API +#define __Pyx_BEGIN_CRITICAL_SECTION(o) { +#define __Pyx_END_CRITICAL_SECTION() } +#else +#define __Pyx_BEGIN_CRITICAL_SECTION Py_BEGIN_CRITICAL_SECTION +#define __Pyx_END_CRITICAL_SECTION Py_END_CRITICAL_SECTION +#endif + +/* IncludeStructmemberH.proto (used by FixUpExtensionType) */ +#include + +/* #### Code section: numeric_typedefs ### */ +/* #### Code section: complex_type_declarations ### */ +/* Declarations.proto */ +#if CYTHON_CCOMPLEX && (1) && (!0 || __cplusplus) + #ifdef __cplusplus + typedef ::std::complex< double > __pyx_t_double_complex; + #else + typedef double _Complex __pyx_t_double_complex; + #endif +#else + typedef struct { double real, imag; } __pyx_t_double_complex; +#endif +static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); + +/* #### Code section: type_declarations ### */ + +/*--- Type declarations ---*/ +struct __pyx_obj_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen; + +/* "fontTools/cu2qu/cu2qu.py":150 + * + * + * @cython.locals( # <<<<<<<<<<<<<< + * p0=cython.complex, + * p1=cython.complex, +*/ +struct __pyx_obj_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen { + PyObject_HEAD + __pyx_t_double_complex __pyx_v_a; + __pyx_t_double_complex __pyx_v_a1; + __pyx_t_double_complex __pyx_v_b; + __pyx_t_double_complex __pyx_v_b1; + __pyx_t_double_complex __pyx_v_c; + __pyx_t_double_complex __pyx_v_c1; + __pyx_t_double_complex __pyx_v_d; + __pyx_t_double_complex __pyx_v_d1; + double __pyx_v_delta_2; + double __pyx_v_delta_3; + double __pyx_v_dt; + int __pyx_v_i; + int __pyx_v_n; + __pyx_t_double_complex __pyx_v_p0; + __pyx_t_double_complex __pyx_v_p1; + __pyx_t_double_complex __pyx_v_p2; + __pyx_t_double_complex __pyx_v_p3; + double __pyx_v_t1; + double __pyx_v_t1_2; + int __pyx_t_0; + int __pyx_t_1; + int __pyx_t_2; +}; + +/* #### Code section: utility_code_proto ### */ + +/* --- Runtime support code (head) --- */ +/* Refnanny.proto */ +#ifndef CYTHON_REFNANNY + #define CYTHON_REFNANNY 0 +#endif +#if CYTHON_REFNANNY + typedef struct { + void (*INCREF)(void*, PyObject*, Py_ssize_t); + void (*DECREF)(void*, PyObject*, Py_ssize_t); + void (*GOTREF)(void*, PyObject*, Py_ssize_t); + void (*GIVEREF)(void*, PyObject*, Py_ssize_t); + void* (*SetupContext)(const char*, Py_ssize_t, const char*); + void (*FinishContext)(void**); + } __Pyx_RefNannyAPIStruct; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); + #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + if (acquire_gil) {\ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__));\ + PyGILState_Release(__pyx_gilstate_save);\ + } else {\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__));\ + } + #define __Pyx_RefNannyFinishContextNogil() {\ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ + __Pyx_RefNannyFinishContext();\ + PyGILState_Release(__pyx_gilstate_save);\ + } + #define __Pyx_RefNannyFinishContextNogil() {\ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ + __Pyx_RefNannyFinishContext();\ + PyGILState_Release(__pyx_gilstate_save);\ + } + #define __Pyx_RefNannyFinishContext()\ + __Pyx_RefNanny->FinishContext(&__pyx_refnanny) + #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) + #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) + #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) + #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) + #define __Pyx_XINCREF(r) do { if((r) == NULL); else {__Pyx_INCREF(r); }} while(0) + #define __Pyx_XDECREF(r) do { if((r) == NULL); else {__Pyx_DECREF(r); }} while(0) + #define __Pyx_XGOTREF(r) do { if((r) == NULL); else {__Pyx_GOTREF(r); }} while(0) + #define __Pyx_XGIVEREF(r) do { if((r) == NULL); else {__Pyx_GIVEREF(r);}} while(0) +#else + #define __Pyx_RefNannyDeclarations + #define __Pyx_RefNannySetupContext(name, acquire_gil) + #define __Pyx_RefNannyFinishContextNogil() + #define __Pyx_RefNannyFinishContext() + #define __Pyx_INCREF(r) Py_INCREF(r) + #define __Pyx_DECREF(r) Py_DECREF(r) + #define __Pyx_GOTREF(r) + #define __Pyx_GIVEREF(r) + #define __Pyx_XINCREF(r) Py_XINCREF(r) + #define __Pyx_XDECREF(r) Py_XDECREF(r) + #define __Pyx_XGOTREF(r) + #define __Pyx_XGIVEREF(r) +#endif +#define __Pyx_Py_XDECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; Py_XDECREF(tmp);\ + } while (0) +#define __Pyx_XDECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_XDECREF(tmp);\ + } while (0) +#define __Pyx_DECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_DECREF(tmp);\ + } while (0) +#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) +#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) + +/* IncludeStdlibH.proto */ +#include + +/* PyObjectCall.proto (used by PyObjectFastCall) */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); +#else +#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) +#endif + +/* PyObjectCallMethO.proto (used by PyObjectFastCall) */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); +#endif + +/* PyObjectFastCall.proto */ +#define __Pyx_PyObject_FastCall(func, args, nargs) __Pyx_PyObject_FastCallDict(func, args, (size_t)(nargs), NULL) +static CYTHON_INLINE PyObject* __Pyx_PyObject_FastCallDict(PyObject *func, PyObject * const*args, size_t nargs, PyObject *kwargs); + +/* PyLongCompare.proto */ +static CYTHON_INLINE int __Pyx_PyLong_BoolEqObjC(PyObject *op1, PyObject *op2, long intval, long inplace); + +/* RaiseTooManyValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); + +/* RaiseNeedMoreValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); + +/* PyThreadStateGet.proto (used by PyErrFetchRestore) */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; +#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; +#if PY_VERSION_HEX >= 0x030C00A6 +#define __Pyx_PyErr_Occurred() (__pyx_tstate->current_exception != NULL) +#define __Pyx_PyErr_CurrentExceptionType() (__pyx_tstate->current_exception ? (PyObject*) Py_TYPE(__pyx_tstate->current_exception) : (PyObject*) NULL) +#else +#define __Pyx_PyErr_Occurred() (__pyx_tstate->curexc_type != NULL) +#define __Pyx_PyErr_CurrentExceptionType() (__pyx_tstate->curexc_type) +#endif +#else +#define __Pyx_PyThreadState_declare +#define __Pyx_PyThreadState_assign +#define __Pyx_PyErr_Occurred() (PyErr_Occurred() != NULL) +#define __Pyx_PyErr_CurrentExceptionType() PyErr_Occurred() +#endif + +/* PyErrFetchRestore.proto (used by IterFinish) */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) +#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A6 +#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) +#else +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#endif +#else +#define __Pyx_PyErr_Clear() PyErr_Clear() +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) +#endif + +/* IterFinish.proto */ +static CYTHON_INLINE int __Pyx_IterFinish(void); + +/* UnpackItemEndCheck.proto */ +static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); + +/* GetItemInt.proto */ +#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck, has_gil, unsafe_shared)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck, unsafe_shared) :\ + (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ + __Pyx_GetItemInt_Generic(o, to_py_func(i)))) +#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck, has_gil, unsafe_shared)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck, unsafe_shared) :\ + (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, + int wraparound, int boundscheck, int unsafe_shared); +#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck, has_gil, unsafe_shared)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck, unsafe_shared) :\ + (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, + int wraparound, int boundscheck, int unsafe_shared); +static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, + int is_list, int wraparound, int boundscheck, int unsafe_shared); + +/* PyErrExceptionMatches.proto (used by PyObjectGetAttrStrNoError) */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) +static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); +#else +#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) +#endif + +/* PyObjectGetAttrStr.proto (used by PyObjectGetAttrStrNoError) */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) +#endif + +/* PyObjectGetAttrStrNoError.proto (used by GetBuiltinName) */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); + +/* GetBuiltinName.proto (used by GetModuleGlobalName) */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name); + +/* PyDictVersioning.proto (used by GetModuleGlobalName) */ +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) +#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ + (version_var) = __PYX_GET_DICT_VERSION(dict);\ + (cache_var) = (value); +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ + static PY_UINT64_T __pyx_dict_version = 0;\ + static PyObject *__pyx_dict_cached_value = NULL;\ + if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ + (VAR) = __Pyx_XNewRef(__pyx_dict_cached_value);\ + } else {\ + (VAR) = __pyx_dict_cached_value = (LOOKUP);\ + __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ + }\ +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); +#else +#define __PYX_GET_DICT_VERSION(dict) (0) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); +#endif + +/* GetModuleGlobalName.proto */ +#if CYTHON_USE_DICT_VERSIONS +#define __Pyx_GetModuleGlobalName(var, name) do {\ + static PY_UINT64_T __pyx_dict_version = 0;\ + static PyObject *__pyx_dict_cached_value = NULL;\ + (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_mstate_global->__pyx_d))) ?\ + (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ + __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ +} while(0) +#define __Pyx_GetModuleGlobalNameUncached(var, name) do {\ + PY_UINT64_T __pyx_dict_version;\ + PyObject *__pyx_dict_cached_value;\ + (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ +} while(0) +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); +#else +#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) +#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); +#endif + +/* TupleAndListFromArray.proto (used by fastcall) */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyList_FromArray(PyObject *const *src, Py_ssize_t n); +#endif +#if CYTHON_COMPILING_IN_CPYTHON || CYTHON_METH_FASTCALL +static CYTHON_INLINE PyObject* __Pyx_PyTuple_FromArray(PyObject *const *src, Py_ssize_t n); +#endif + +/* IncludeStringH.proto (used by BytesEquals) */ +#include + +/* BytesEquals.proto (used by UnicodeEquals) */ +static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); + +/* UnicodeEquals.proto (used by fastcall) */ +static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); + +/* fastcall.proto */ +#if CYTHON_AVOID_BORROWED_REFS + #define __Pyx_ArgRef_VARARGS(args, i) __Pyx_PySequence_ITEM(args, i) +#elif CYTHON_ASSUME_SAFE_MACROS + #define __Pyx_ArgRef_VARARGS(args, i) __Pyx_NewRef(__Pyx_PyTuple_GET_ITEM(args, i)) +#else + #define __Pyx_ArgRef_VARARGS(args, i) __Pyx_XNewRef(PyTuple_GetItem(args, i)) +#endif +#define __Pyx_NumKwargs_VARARGS(kwds) PyDict_Size(kwds) +#define __Pyx_KwValues_VARARGS(args, nargs) NULL +#define __Pyx_GetKwValue_VARARGS(kw, kwvalues, s) __Pyx_PyDict_GetItemStrWithError(kw, s) +#define __Pyx_KwargsAsDict_VARARGS(kw, kwvalues) PyDict_Copy(kw) +#if CYTHON_METH_FASTCALL + #define __Pyx_ArgRef_FASTCALL(args, i) __Pyx_NewRef(args[i]) + #define __Pyx_NumKwargs_FASTCALL(kwds) __Pyx_PyTuple_GET_SIZE(kwds) + #define __Pyx_KwValues_FASTCALL(args, nargs) ((args) + (nargs)) + static CYTHON_INLINE PyObject * __Pyx_GetKwValue_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues, PyObject *s); + #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030d0000 || CYTHON_COMPILING_IN_LIMITED_API + CYTHON_UNUSED static PyObject *__Pyx_KwargsAsDict_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues); + #else + #define __Pyx_KwargsAsDict_FASTCALL(kw, kwvalues) _PyStack_AsDict(kwvalues, kw) + #endif +#else + #define __Pyx_ArgRef_FASTCALL __Pyx_ArgRef_VARARGS + #define __Pyx_NumKwargs_FASTCALL __Pyx_NumKwargs_VARARGS + #define __Pyx_KwValues_FASTCALL __Pyx_KwValues_VARARGS + #define __Pyx_GetKwValue_FASTCALL __Pyx_GetKwValue_VARARGS + #define __Pyx_KwargsAsDict_FASTCALL __Pyx_KwargsAsDict_VARARGS +#endif +#define __Pyx_ArgsSlice_VARARGS(args, start, stop) PyTuple_GetSlice(args, start, stop) +#if CYTHON_METH_FASTCALL || (CYTHON_COMPILING_IN_CPYTHON && CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) +#define __Pyx_ArgsSlice_FASTCALL(args, start, stop) __Pyx_PyTuple_FromArray(args + start, stop - start) +#else +#define __Pyx_ArgsSlice_FASTCALL(args, start, stop) PyTuple_GetSlice(args, start, stop) +#endif + +/* py_dict_items.proto (used by OwnedDictNext) */ +static CYTHON_INLINE PyObject* __Pyx_PyDict_Items(PyObject* d); + +/* CallCFunction.proto (used by CallUnboundCMethod0) */ +#define __Pyx_CallCFunction(cfunc, self, args)\ + ((PyCFunction)(void(*)(void))(cfunc)->func)(self, args) +#define __Pyx_CallCFunctionWithKeywords(cfunc, self, args, kwargs)\ + ((PyCFunctionWithKeywords)(void(*)(void))(cfunc)->func)(self, args, kwargs) +#define __Pyx_CallCFunctionFast(cfunc, self, args, nargs)\ + ((__Pyx_PyCFunctionFast)(void(*)(void))(PyCFunction)(cfunc)->func)(self, args, nargs) +#define __Pyx_CallCFunctionFastWithKeywords(cfunc, self, args, nargs, kwnames)\ + ((__Pyx_PyCFunctionFastWithKeywords)(void(*)(void))(PyCFunction)(cfunc)->func)(self, args, nargs, kwnames) + +/* PyObjectCallOneArg.proto (used by CallUnboundCMethod0) */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); + +/* UnpackUnboundCMethod.proto (used by CallUnboundCMethod0) */ +typedef struct { + PyObject *type; + PyObject **method_name; +#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING && CYTHON_ATOMICS + __pyx_atomic_int_type initialized; +#endif + PyCFunction func; + PyObject *method; + int flag; +} __Pyx_CachedCFunction; +#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING +static CYTHON_INLINE int __Pyx_CachedCFunction_GetAndSetInitializing(__Pyx_CachedCFunction *cfunc) { +#if !CYTHON_ATOMICS + return 1; +#else + __pyx_nonatomic_int_type expected = 0; + if (__pyx_atomic_int_cmp_exchange(&cfunc->initialized, &expected, 1)) { + return 0; + } + return expected; +#endif +} +static CYTHON_INLINE void __Pyx_CachedCFunction_SetFinishedInitializing(__Pyx_CachedCFunction *cfunc) { +#if CYTHON_ATOMICS + __pyx_atomic_store(&cfunc->initialized, 2); +#endif +} +#else +#define __Pyx_CachedCFunction_GetAndSetInitializing(cfunc) 2 +#define __Pyx_CachedCFunction_SetFinishedInitializing(cfunc) +#endif + +/* CallUnboundCMethod0.proto */ +CYTHON_UNUSED +static PyObject* __Pyx__CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObject* self); +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObject* self); +#else +#define __Pyx_CallUnboundCMethod0(cfunc, self) __Pyx__CallUnboundCMethod0(cfunc, self) +#endif + +/* py_dict_values.proto (used by OwnedDictNext) */ +static CYTHON_INLINE PyObject* __Pyx_PyDict_Values(PyObject* d); + +/* OwnedDictNext.proto (used by ParseKeywordsImpl) */ +#if CYTHON_AVOID_BORROWED_REFS +static int __Pyx_PyDict_NextRef(PyObject *p, PyObject **ppos, PyObject **pkey, PyObject **pvalue); +#else +CYTHON_INLINE +static int __Pyx_PyDict_NextRef(PyObject *p, Py_ssize_t *ppos, PyObject **pkey, PyObject **pvalue); +#endif + +/* RaiseDoubleKeywords.proto (used by ParseKeywordsImpl) */ +static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); + +/* ParseKeywordsImpl.export */ +static int __Pyx_ParseKeywordsTuple( + PyObject *kwds, + PyObject * const *kwvalues, + PyObject ** const argnames[], + PyObject *kwds2, + PyObject *values[], + Py_ssize_t num_pos_args, + Py_ssize_t num_kwargs, + const char* function_name, + int ignore_unknown_kwargs +); +static int __Pyx_ParseKeywordDictToDict( + PyObject *kwds, + PyObject ** const argnames[], + PyObject *kwds2, + PyObject *values[], + Py_ssize_t num_pos_args, + const char* function_name +); +static int __Pyx_ParseKeywordDict( + PyObject *kwds, + PyObject ** const argnames[], + PyObject *values[], + Py_ssize_t num_pos_args, + Py_ssize_t num_kwargs, + const char* function_name, + int ignore_unknown_kwargs +); + +/* CallUnboundCMethod2.proto */ +CYTHON_UNUSED +static PyObject* __Pyx__CallUnboundCMethod2(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg1, PyObject* arg2); +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject *__Pyx_CallUnboundCMethod2(__Pyx_CachedCFunction *cfunc, PyObject *self, PyObject *arg1, PyObject *arg2); +#else +#define __Pyx_CallUnboundCMethod2(cfunc, self, arg1, arg2) __Pyx__CallUnboundCMethod2(cfunc, self, arg1, arg2) +#endif + +/* ParseKeywords.proto */ +static CYTHON_INLINE int __Pyx_ParseKeywords( + PyObject *kwds, PyObject *const *kwvalues, PyObject ** const argnames[], + PyObject *kwds2, PyObject *values[], + Py_ssize_t num_pos_args, Py_ssize_t num_kwargs, + const char* function_name, + int ignore_unknown_kwargs +); + +/* RaiseArgTupleInvalid.proto */ +static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, + Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); + +/* GetException.proto (used by pep479) */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); +#endif + +/* pep479.proto */ +static void __Pyx_Generator_Replace_StopIteration(int in_async_gen); + +/* GetTopmostException.proto (used by SaveResetException) */ +#if CYTHON_USE_EXC_INFO_STACK && CYTHON_FAST_THREAD_STATE +static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); +#endif + +/* SaveResetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +#else +#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) +#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) +#endif + +/* PyZeroDivisionError_Check.proto */ +#define __Pyx_PyExc_ZeroDivisionError_Check(obj) __Pyx_TypeCheck(obj, PyExc_ZeroDivisionError) + +/* IterNextPlain.proto (used by IterNext) */ +static CYTHON_INLINE PyObject *__Pyx_PyIter_Next_Plain(PyObject *iterator); +#if CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX < 0x030A0000 +static PyObject *__Pyx_GetBuiltinNext_LimitedAPI(void); +#endif + +/* IterNext.proto */ +#define __Pyx_PyIter_Next(obj) __Pyx_PyIter_Next2(obj, NULL) +static CYTHON_INLINE PyObject *__Pyx_PyIter_Next2(PyObject *, PyObject *); + +/* ListAppend.proto */ +#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS +static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { + PyListObject* L = (PyListObject*) list; + Py_ssize_t len = Py_SIZE(list); + if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { + Py_INCREF(x); + #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030d0000 + L->ob_item[len] = x; + #else + PyList_SET_ITEM(list, len, x); + #endif + __Pyx_SET_SIZE(list, len + 1); + return 0; + } + return PyList_Append(list, x); +} +#else +#define __Pyx_PyList_Append(L,x) PyList_Append(L,x) +#endif + +/* ListCompAppend.proto */ +#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS +static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { + PyListObject* L = (PyListObject*) list; + Py_ssize_t len = Py_SIZE(list); + if (likely(L->allocated > len)) { + Py_INCREF(x); + #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030d0000 + L->ob_item[len] = x; + #else + PyList_SET_ITEM(list, len, x); + #endif + __Pyx_SET_SIZE(list, len + 1); + return 0; + } + return PyList_Append(list, x); +} +#else +#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) +#endif + +/* PyLongBinop.proto */ +#if !CYTHON_COMPILING_IN_PYPY +static CYTHON_INLINE PyObject* __Pyx_PyLong_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); +#else +#define __Pyx_PyLong_AddObjC(op1, op2, intval, inplace, zerodivision_check)\ + (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) +#endif + +/* RaiseException.export */ +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); + +/* AssertionsEnabled.proto */ +#if CYTHON_COMPILING_IN_LIMITED_API || PY_VERSION_HEX >= 0x030C0000 + static int __pyx_assertions_enabled_flag; + #define __pyx_assertions_enabled() (__pyx_assertions_enabled_flag) + #if __clang__ || __GNUC__ + __attribute__((no_sanitize("thread"))) + #endif + static int __Pyx_init_assertions_enabled(void) { + PyObject *builtins, *debug, *debug_str; + int flag; + builtins = PyEval_GetBuiltins(); + if (!builtins) goto bad; + debug_str = PyUnicode_FromStringAndSize("__debug__", 9); + if (!debug_str) goto bad; + debug = PyObject_GetItem(builtins, debug_str); + Py_DECREF(debug_str); + if (!debug) goto bad; + flag = PyObject_IsTrue(debug); + Py_DECREF(debug); + if (flag == -1) goto bad; + __pyx_assertions_enabled_flag = flag; + return 0; + bad: + __pyx_assertions_enabled_flag = 1; + return -1; + } +#else + #define __Pyx_init_assertions_enabled() (0) + #define __pyx_assertions_enabled() (!Py_OptimizeFlag) +#endif + +/* PyAssertionError_Check.proto */ +#define __Pyx_PyExc_AssertionError_Check(obj) __Pyx_TypeCheck(obj, PyExc_AssertionError) + +/* SetItemInt.proto */ +#define __Pyx_SetItemInt(o, i, v, type, is_signed, to_py_func, is_list, wraparound, boundscheck, has_gil, unsafe_shared)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_SetItemInt_Fast(o, (Py_ssize_t)i, v, is_list, wraparound, boundscheck, unsafe_shared) :\ + (is_list ? (PyErr_SetString(PyExc_IndexError, "list assignment index out of range"), -1) :\ + __Pyx_SetItemInt_Generic(o, to_py_func(i), v))) +static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v); +static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, + int is_list, int wraparound, int boundscheck, int unsafe_shared); + +/* ModInt[long].proto */ +static CYTHON_INLINE long __Pyx_mod_long(long, long, int b_is_constant); + +/* CheckTypeForFreelists.proto */ +#if CYTHON_USE_FREELISTS +#if CYTHON_USE_TYPE_SPECS +#define __PYX_CHECK_FINAL_TYPE_FOR_FREELISTS(t, expected_tp, expected_size) ((int) ((t) == (expected_tp))) +#define __PYX_CHECK_TYPE_FOR_FREELIST_FLAGS Py_TPFLAGS_IS_ABSTRACT +#else +#define __PYX_CHECK_FINAL_TYPE_FOR_FREELISTS(t, expected_tp, expected_size) ((int) ((t)->tp_basicsize == (expected_size))) +#define __PYX_CHECK_TYPE_FOR_FREELIST_FLAGS (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE) +#endif +#define __PYX_CHECK_TYPE_FOR_FREELISTS(t, expected_tp, expected_size)\ + (__PYX_CHECK_FINAL_TYPE_FOR_FREELISTS((t), (expected_tp), (expected_size)) &\ + (int) (!__Pyx_PyType_HasFeature((t), __PYX_CHECK_TYPE_FOR_FREELIST_FLAGS))) +#endif + +/* AllocateExtensionType.proto */ +static PyObject *__Pyx_AllocateExtensionType(PyTypeObject *t, int is_final); + +/* LimitedApiGetTypeDict.proto (used by SetItemOnTypeDict) */ +#if CYTHON_COMPILING_IN_LIMITED_API +static PyObject *__Pyx_GetTypeDict(PyTypeObject *tp); +#endif + +/* SetItemOnTypeDict.proto (used by FixUpExtensionType) */ +static int __Pyx__SetItemOnTypeDict(PyTypeObject *tp, PyObject *k, PyObject *v); +#define __Pyx_SetItemOnTypeDict(tp, k, v) __Pyx__SetItemOnTypeDict((PyTypeObject*)tp, k, v) + +/* FixUpExtensionType.proto */ +static CYTHON_INLINE int __Pyx_fix_up_extension_type_from_spec(PyType_Spec *spec, PyTypeObject *type); + +/* PyObjectCallNoArg.proto (used by PyObjectCallMethod0) */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); + +/* PyObjectGetMethod.proto (used by PyObjectCallMethod0) */ +#if !(CYTHON_VECTORCALL && (__PYX_LIMITED_VERSION_HEX >= 0x030C0000 || (!CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x03090000))) +static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method); +#endif + +/* PyObjectCallMethod0.proto (used by PyType_Ready) */ +static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name); + +/* ValidateBasesTuple.proto (used by PyType_Ready) */ +#if CYTHON_COMPILING_IN_CPYTHON || CYTHON_COMPILING_IN_LIMITED_API || CYTHON_USE_TYPE_SPECS +static int __Pyx_validate_bases_tuple(const char *type_name, Py_ssize_t dictoffset, PyObject *bases); +#endif + +/* PyType_Ready.proto */ +CYTHON_UNUSED static int __Pyx_PyType_Ready(PyTypeObject *t); + +/* HasAttr.proto (used by ImportImpl) */ +#if __PYX_LIMITED_VERSION_HEX >= 0x030d0000 +#define __Pyx_HasAttr(o, n) PyObject_HasAttrWithError(o, n) +#else +static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); +#endif + +/* ImportImpl.export */ +static PyObject *__Pyx__Import(PyObject *name, PyObject *const *imported_names, Py_ssize_t len_imported_names, PyObject *qualname, PyObject *moddict, int level); + +/* Import.proto */ +static CYTHON_INLINE PyObject *__Pyx_Import(PyObject *name, PyObject *const *imported_names, Py_ssize_t len_imported_names, PyObject *qualname, int level); + +/* ImportFrom.proto */ +static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); + +/* ListPack.proto */ +static PyObject *__Pyx_PyList_Pack(Py_ssize_t n, ...); + +/* pybytes_as_double.proto (used by pyunicode_as_double) */ +static double __Pyx_SlowPyString_AsDouble(PyObject *obj); +static double __Pyx__PyBytes_AsDouble(PyObject *obj, const char* start, Py_ssize_t length); +static CYTHON_INLINE double __Pyx_PyBytes_AsDouble(PyObject *obj) { + char* as_c_string; + Py_ssize_t size; +#if CYTHON_ASSUME_SAFE_MACROS && CYTHON_ASSUME_SAFE_SIZE + as_c_string = PyBytes_AS_STRING(obj); + size = PyBytes_GET_SIZE(obj); +#else + if (PyBytes_AsStringAndSize(obj, &as_c_string, &size) < 0) { + return (double)-1; + } +#endif + return __Pyx__PyBytes_AsDouble(obj, as_c_string, size); +} +static CYTHON_INLINE double __Pyx_PyByteArray_AsDouble(PyObject *obj) { + char* as_c_string; + Py_ssize_t size; +#if CYTHON_ASSUME_SAFE_MACROS && CYTHON_ASSUME_SAFE_SIZE + as_c_string = PyByteArray_AS_STRING(obj); + size = PyByteArray_GET_SIZE(obj); +#else + as_c_string = PyByteArray_AsString(obj); + if (as_c_string == NULL) { + return (double)-1; + } + size = PyByteArray_Size(obj); +#endif + return __Pyx__PyBytes_AsDouble(obj, as_c_string, size); +} + +/* pyunicode_as_double.proto */ +#if !CYTHON_COMPILING_IN_PYPY && CYTHON_ASSUME_SAFE_MACROS +static const char* __Pyx__PyUnicode_AsDouble_Copy(const void* data, const int kind, char* buffer, Py_ssize_t start, Py_ssize_t end) { + int last_was_punctuation; + Py_ssize_t i; + last_was_punctuation = 1; + for (i=start; i <= end; i++) { + Py_UCS4 chr = PyUnicode_READ(kind, data, i); + int is_punctuation = (chr == '_') | (chr == '.'); + *buffer = (char)chr; + buffer += (chr != '_'); + if (unlikely(chr > 127)) goto parse_failure; + if (unlikely(last_was_punctuation & is_punctuation)) goto parse_failure; + last_was_punctuation = is_punctuation; + } + if (unlikely(last_was_punctuation)) goto parse_failure; + *buffer = '\0'; + return buffer; +parse_failure: + return NULL; +} +static double __Pyx__PyUnicode_AsDouble_inf_nan(const void* data, int kind, Py_ssize_t start, Py_ssize_t length) { + int matches = 1; + Py_UCS4 chr; + Py_UCS4 sign = PyUnicode_READ(kind, data, start); + int is_signed = (sign == '-') | (sign == '+'); + start += is_signed; + length -= is_signed; + switch (PyUnicode_READ(kind, data, start)) { + #ifdef Py_NAN + case 'n': + case 'N': + if (unlikely(length != 3)) goto parse_failure; + chr = PyUnicode_READ(kind, data, start+1); + matches &= (chr == 'a') | (chr == 'A'); + chr = PyUnicode_READ(kind, data, start+2); + matches &= (chr == 'n') | (chr == 'N'); + if (unlikely(!matches)) goto parse_failure; + return (sign == '-') ? -Py_NAN : Py_NAN; + #endif + case 'i': + case 'I': + if (unlikely(length < 3)) goto parse_failure; + chr = PyUnicode_READ(kind, data, start+1); + matches &= (chr == 'n') | (chr == 'N'); + chr = PyUnicode_READ(kind, data, start+2); + matches &= (chr == 'f') | (chr == 'F'); + if (likely(length == 3 && matches)) + return (sign == '-') ? -Py_HUGE_VAL : Py_HUGE_VAL; + if (unlikely(length != 8)) goto parse_failure; + chr = PyUnicode_READ(kind, data, start+3); + matches &= (chr == 'i') | (chr == 'I'); + chr = PyUnicode_READ(kind, data, start+4); + matches &= (chr == 'n') | (chr == 'N'); + chr = PyUnicode_READ(kind, data, start+5); + matches &= (chr == 'i') | (chr == 'I'); + chr = PyUnicode_READ(kind, data, start+6); + matches &= (chr == 't') | (chr == 'T'); + chr = PyUnicode_READ(kind, data, start+7); + matches &= (chr == 'y') | (chr == 'Y'); + if (unlikely(!matches)) goto parse_failure; + return (sign == '-') ? -Py_HUGE_VAL : Py_HUGE_VAL; + case '.': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': + break; + default: + goto parse_failure; + } + return 0.0; +parse_failure: + return -1.0; +} +static double __Pyx_PyUnicode_AsDouble_WithSpaces(PyObject *obj) { + double value; + const char *last; + char *end; + Py_ssize_t start, length = PyUnicode_GET_LENGTH(obj); + const int kind = PyUnicode_KIND(obj); + const void* data = PyUnicode_DATA(obj); + start = 0; + while (Py_UNICODE_ISSPACE(PyUnicode_READ(kind, data, start))) + start++; + while (start < length - 1 && Py_UNICODE_ISSPACE(PyUnicode_READ(kind, data, length - 1))) + length--; + length -= start; + if (unlikely(length <= 0)) goto fallback; + value = __Pyx__PyUnicode_AsDouble_inf_nan(data, kind, start, length); + if (unlikely(value == -1.0)) goto fallback; + if (value != 0.0) return value; + if (length < 40) { + char number[40]; + last = __Pyx__PyUnicode_AsDouble_Copy(data, kind, number, start, start + length); + if (unlikely(!last)) goto fallback; + value = PyOS_string_to_double(number, &end, NULL); + } else { + char *number = (char*) PyMem_Malloc((length + 1) * sizeof(char)); + if (unlikely(!number)) goto fallback; + last = __Pyx__PyUnicode_AsDouble_Copy(data, kind, number, start, start + length); + if (unlikely(!last)) { + PyMem_Free(number); + goto fallback; + } + value = PyOS_string_to_double(number, &end, NULL); + PyMem_Free(number); + } + if (likely(end == last) || (value == (double)-1 && PyErr_Occurred())) { + return value; + } +fallback: + return __Pyx_SlowPyString_AsDouble(obj); +} +#endif +static CYTHON_INLINE double __Pyx_PyUnicode_AsDouble(PyObject *obj) { +#if !CYTHON_COMPILING_IN_PYPY && CYTHON_ASSUME_SAFE_MACROS + if (unlikely(__Pyx_PyUnicode_READY(obj) == -1)) + return (double)-1; + if (likely(PyUnicode_IS_ASCII(obj))) { + const char *s; + Py_ssize_t length; + s = PyUnicode_AsUTF8AndSize(obj, &length); + return __Pyx__PyBytes_AsDouble(obj, s, length); + } + return __Pyx_PyUnicode_AsDouble_WithSpaces(obj); +#else + return __Pyx_SlowPyString_AsDouble(obj); +#endif +} + +/* FloatExceptionCheck.proto */ +#define __PYX_CHECK_FLOAT_EXCEPTION(value, error_value)\ + ((error_value) == (error_value) ?\ + (value) == (error_value) :\ + (value) != (value)) + +/* dict_setdefault.proto (used by FetchCommonType) */ +static CYTHON_INLINE PyObject *__Pyx_PyDict_SetDefault(PyObject *d, PyObject *key, PyObject *default_value); + +/* AddModuleRef.proto (used by FetchSharedCythonModule) */ +#if ((CYTHON_COMPILING_IN_CPYTHON_FREETHREADING ) ||\ + __PYX_LIMITED_VERSION_HEX < 0x030d0000) + static PyObject *__Pyx_PyImport_AddModuleRef(const char *name); +#else + #define __Pyx_PyImport_AddModuleRef(name) PyImport_AddModuleRef(name) +#endif + +/* FetchSharedCythonModule.proto (used by FetchCommonType) */ +static PyObject *__Pyx_FetchSharedCythonABIModule(void); + +/* FetchCommonType.proto (used by CommonTypesMetaclass) */ +static PyTypeObject* __Pyx_FetchCommonTypeFromSpec(PyTypeObject *metaclass, PyObject *module, PyType_Spec *spec, PyObject *bases); + +/* CommonTypesMetaclass.proto (used by CythonFunctionShared) */ +static int __pyx_CommonTypesMetaclass_init(PyObject *module); +#define __Pyx_CommonTypesMetaclass_USED + +/* CallTypeTraverse.proto (used by CythonFunctionShared) */ +#if !CYTHON_USE_TYPE_SPECS || (!CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x03090000) +#define __Pyx_call_type_traverse(o, always_call, visit, arg) 0 +#else +static int __Pyx_call_type_traverse(PyObject *o, int always_call, visitproc visit, void *arg); +#endif + +/* PyMethodNew.proto (used by CythonFunctionShared) */ +static PyObject *__Pyx_PyMethod_New(PyObject *func, PyObject *self, PyObject *typ); + +/* PyVectorcallFastCallDict.proto (used by CythonFunctionShared) */ +#if CYTHON_METH_FASTCALL && CYTHON_VECTORCALL +static CYTHON_INLINE PyObject *__Pyx_PyVectorcall_FastCallDict(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw); +#endif + +/* CythonFunctionShared.proto (used by CythonFunction) */ +#define __Pyx_CyFunction_USED +#define __Pyx_CYFUNCTION_STATICMETHOD 0x01 +#define __Pyx_CYFUNCTION_CLASSMETHOD 0x02 +#define __Pyx_CYFUNCTION_CCLASS 0x04 +#define __Pyx_CYFUNCTION_COROUTINE 0x08 +#define __Pyx_CyFunction_GetClosure(f)\ + (((__pyx_CyFunctionObject *) (f))->func_closure) +#if PY_VERSION_HEX < 0x030900B1 || CYTHON_COMPILING_IN_LIMITED_API + #define __Pyx_CyFunction_GetClassObj(f)\ + (((__pyx_CyFunctionObject *) (f))->func_classobj) +#else + #define __Pyx_CyFunction_GetClassObj(f)\ + ((PyObject*) ((PyCMethodObject *) (f))->mm_class) +#endif +#define __Pyx_CyFunction_SetClassObj(f, classobj)\ + __Pyx__CyFunction_SetClassObj((__pyx_CyFunctionObject *) (f), (classobj)) +#define __Pyx_CyFunction_Defaults(type, f)\ + ((type *)(((__pyx_CyFunctionObject *) (f))->defaults)) +#define __Pyx_CyFunction_SetDefaultsGetter(f, g)\ + ((__pyx_CyFunctionObject *) (f))->defaults_getter = (g) +typedef struct { +#if CYTHON_COMPILING_IN_LIMITED_API + PyObject_HEAD + PyObject *func; +#elif PY_VERSION_HEX < 0x030900B1 + PyCFunctionObject func; +#else + PyCMethodObject func; +#endif +#if CYTHON_COMPILING_IN_LIMITED_API && CYTHON_METH_FASTCALL + __pyx_vectorcallfunc func_vectorcall; +#endif +#if CYTHON_COMPILING_IN_LIMITED_API + PyObject *func_weakreflist; +#endif +#if PY_VERSION_HEX < 0x030C0000 || CYTHON_COMPILING_IN_LIMITED_API + PyObject *func_dict; +#endif + PyObject *func_name; + PyObject *func_qualname; + PyObject *func_doc; + PyObject *func_globals; + PyObject *func_code; + PyObject *func_closure; +#if PY_VERSION_HEX < 0x030900B1 || CYTHON_COMPILING_IN_LIMITED_API + PyObject *func_classobj; +#endif + PyObject *defaults; + int flags; + PyObject *defaults_tuple; + PyObject *defaults_kwdict; + PyObject *(*defaults_getter)(PyObject *); + PyObject *func_annotations; + PyObject *func_is_coroutine; +} __pyx_CyFunctionObject; +#undef __Pyx_CyOrPyCFunction_Check +#define __Pyx_CyFunction_Check(obj) __Pyx_TypeCheck(obj, __pyx_mstate_global->__pyx_CyFunctionType) +#define __Pyx_CyOrPyCFunction_Check(obj) __Pyx_TypeCheck2(obj, __pyx_mstate_global->__pyx_CyFunctionType, &PyCFunction_Type) +#define __Pyx_CyFunction_CheckExact(obj) __Pyx_IS_TYPE(obj, __pyx_mstate_global->__pyx_CyFunctionType) +static CYTHON_INLINE int __Pyx__IsSameCyOrCFunction(PyObject *func, void (*cfunc)(void)); +#undef __Pyx_IsSameCFunction +#define __Pyx_IsSameCFunction(func, cfunc) __Pyx__IsSameCyOrCFunction(func, cfunc) +static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject* op, PyMethodDef *ml, + int flags, PyObject* qualname, + PyObject *closure, + PyObject *module, PyObject *globals, + PyObject* code); +static CYTHON_INLINE void __Pyx__CyFunction_SetClassObj(__pyx_CyFunctionObject* f, PyObject* classobj); +static CYTHON_INLINE PyObject *__Pyx_CyFunction_InitDefaults(PyObject *func, + PyTypeObject *defaults_type); +static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *m, + PyObject *tuple); +static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *m, + PyObject *dict); +static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *m, + PyObject *dict); +static int __pyx_CyFunction_init(PyObject *module); +#if CYTHON_METH_FASTCALL +static PyObject * __Pyx_CyFunction_Vectorcall_NOARGS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); +static PyObject * __Pyx_CyFunction_Vectorcall_O(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); +static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); +static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS_METHOD(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); +#if CYTHON_COMPILING_IN_LIMITED_API +#define __Pyx_CyFunction_func_vectorcall(f) (((__pyx_CyFunctionObject*)f)->func_vectorcall) +#else +#define __Pyx_CyFunction_func_vectorcall(f) (((PyCFunctionObject*)f)->vectorcall) +#endif +#endif + +/* CythonFunction.proto */ +static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml, + int flags, PyObject* qualname, + PyObject *closure, + PyObject *module, PyObject *globals, + PyObject* code); + +/* CLineInTraceback.proto (used by AddTraceback) */ +#if CYTHON_CLINE_IN_TRACEBACK && CYTHON_CLINE_IN_TRACEBACK_RUNTIME +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); +#else +#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) +#endif + +/* CodeObjectCache.proto (used by AddTraceback) */ +#if CYTHON_COMPILING_IN_LIMITED_API +typedef PyObject __Pyx_CachedCodeObjectType; +#else +typedef PyCodeObject __Pyx_CachedCodeObjectType; +#endif +typedef struct { + __Pyx_CachedCodeObjectType* code_object; + int code_line; +} __Pyx_CodeObjectCacheEntry; +struct __Pyx_CodeObjectCache { + int count; + int max_count; + __Pyx_CodeObjectCacheEntry* entries; + #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING + __pyx_atomic_int_type accessor_count; + #endif +}; +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); +static __Pyx_CachedCodeObjectType *__pyx_find_code_object(int code_line); +static void __pyx_insert_code_object(int code_line, __Pyx_CachedCodeObjectType* code_object); + +/* AddTraceback.proto */ +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename); + +/* RealImag.proto */ +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + #define __Pyx_CREAL(z) ((z).real()) + #define __Pyx_CIMAG(z) ((z).imag()) + #else + #define __Pyx_CREAL(z) (__real__(z)) + #define __Pyx_CIMAG(z) (__imag__(z)) + #endif +#else + #define __Pyx_CREAL(z) ((z).real) + #define __Pyx_CIMAG(z) ((z).imag) +#endif +#if defined(__cplusplus) && CYTHON_CCOMPLEX\ + && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) + #define __Pyx_SET_CREAL(z,x) ((z).real(x)) + #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) +#else + #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) + #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) +#endif + +/* Arithmetic.proto */ +#if CYTHON_CCOMPLEX && (1) && (!0 || __cplusplus) + #define __Pyx_c_eq_double(a, b) ((a)==(b)) + #define __Pyx_c_sum_double(a, b) ((a)+(b)) + #define __Pyx_c_diff_double(a, b) ((a)-(b)) + #define __Pyx_c_prod_double(a, b) ((a)*(b)) + #define __Pyx_c_quot_double(a, b) ((a)/(b)) + #define __Pyx_c_neg_double(a) (-(a)) + #ifdef __cplusplus + #define __Pyx_c_is_zero_double(z) ((z)==(double)0) + #define __Pyx_c_conj_double(z) (::std::conj(z)) + #if 1 + #define __Pyx_c_abs_double(z) (::std::abs(z)) + #define __Pyx_c_pow_double(a, b) (::std::pow(a, b)) + #endif + #else + #define __Pyx_c_is_zero_double(z) ((z)==0) + #define __Pyx_c_conj_double(z) (conj(z)) + #if 1 + #define __Pyx_c_abs_double(z) (cabs(z)) + #define __Pyx_c_pow_double(a, b) (cpow(a, b)) + #endif + #endif +#else + static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex); + static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex); + #if 1 + static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex); + #endif +#endif + +/* FromPy.proto */ +static __pyx_t_double_complex __Pyx_PyComplex_As___pyx_t_double_complex(PyObject*); + +/* GCCDiagnostics.proto */ +#if !defined(__INTEL_COMPILER) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) +#define __Pyx_HAS_GCC_DIAGNOSTIC +#endif + +/* ToPy.proto */ +#define __pyx_PyComplex_FromComplex(z)\ + PyComplex_FromDoubles((double)__Pyx_CREAL(z),\ + (double)__Pyx_CIMAG(z)) + +/* CIntFromPy.proto */ +static CYTHON_INLINE int __Pyx_PyLong_As_int(PyObject *); + +/* PyObjectVectorCallKwBuilder.proto (used by CIntToPy) */ +CYTHON_UNUSED static int __Pyx_VectorcallBuilder_AddArg_Check(PyObject *key, PyObject *value, PyObject *builder, PyObject **args, int n); +#if CYTHON_VECTORCALL +#if PY_VERSION_HEX >= 0x03090000 +#define __Pyx_Object_Vectorcall_CallFromBuilder PyObject_Vectorcall +#else +#define __Pyx_Object_Vectorcall_CallFromBuilder _PyObject_Vectorcall +#endif +#define __Pyx_MakeVectorcallBuilderKwds(n) PyTuple_New(n) +static int __Pyx_VectorcallBuilder_AddArg(PyObject *key, PyObject *value, PyObject *builder, PyObject **args, int n); +static int __Pyx_VectorcallBuilder_AddArgStr(const char *key, PyObject *value, PyObject *builder, PyObject **args, int n); +#else +#define __Pyx_Object_Vectorcall_CallFromBuilder __Pyx_PyObject_FastCallDict +#define __Pyx_MakeVectorcallBuilderKwds(n) __Pyx_PyDict_NewPresized(n) +#define __Pyx_VectorcallBuilder_AddArg(key, value, builder, args, n) PyDict_SetItem(builder, key, value) +#define __Pyx_VectorcallBuilder_AddArgStr(key, value, builder, args, n) PyDict_SetItemString(builder, key, value) +#endif + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyLong_From_long(long value); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyLong_From_int(int value); + +/* FormatTypeName.proto */ +#if CYTHON_COMPILING_IN_LIMITED_API +typedef PyObject *__Pyx_TypeName; +#define __Pyx_FMT_TYPENAME "%U" +#define __Pyx_DECREF_TypeName(obj) Py_XDECREF(obj) +#if __PYX_LIMITED_VERSION_HEX >= 0x030d0000 +#define __Pyx_PyType_GetFullyQualifiedName PyType_GetFullyQualifiedName +#else +static __Pyx_TypeName __Pyx_PyType_GetFullyQualifiedName(PyTypeObject* tp); +#endif +#else // !LIMITED_API +typedef const char *__Pyx_TypeName; +#define __Pyx_FMT_TYPENAME "%.200s" +#define __Pyx_PyType_GetFullyQualifiedName(tp) ((tp)->tp_name) +#define __Pyx_DECREF_TypeName(obj) +#endif + +/* CIntFromPy.proto */ +static CYTHON_INLINE long __Pyx_PyLong_As_long(PyObject *); + +/* FastTypeChecks.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) +#define __Pyx_TypeCheck2(obj, type1, type2) __Pyx_IsAnySubtype2(Py_TYPE(obj), (PyTypeObject *)type1, (PyTypeObject *)type2) +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); +static CYTHON_INLINE int __Pyx_IsAnySubtype2(PyTypeObject *cls, PyTypeObject *a, PyTypeObject *b); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); +#else +#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) +#define __Pyx_TypeCheck2(obj, type1, type2) (PyObject_TypeCheck(obj, (PyTypeObject *)type1) || PyObject_TypeCheck(obj, (PyTypeObject *)type2)) +#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2) { + return PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2); +} +#endif +#define __Pyx_PyErr_ExceptionMatches2(err1, err2) __Pyx_PyErr_GivenExceptionMatches2(__Pyx_PyErr_CurrentExceptionType(), err1, err2) +#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) +#ifdef PyExceptionInstance_Check + #define __Pyx_PyBaseException_Check(obj) PyExceptionInstance_Check(obj) +#else + #define __Pyx_PyBaseException_Check(obj) __Pyx_TypeCheck(obj, PyExc_BaseException) +#endif + +/* GetRuntimeVersion.proto */ +#if __PYX_LIMITED_VERSION_HEX < 0x030b0000 +static unsigned long __Pyx_cached_runtime_version = 0; +static void __Pyx_init_runtime_version(void); +#else +#define __Pyx_init_runtime_version() +#endif +static unsigned long __Pyx_get_runtime_version(void); + +/* SwapException.proto (used by CoroutineBase) */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#else +static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); +#endif + +/* PyObjectCall2Args.proto (used by PyObjectCallMethod1) */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); + +/* PyObjectCallMethod1.proto (used by CoroutineBase) */ +static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg); + +/* ReturnWithStopIteration.proto (used by CoroutineBase) */ +static CYTHON_INLINE void __Pyx_ReturnWithStopIteration(PyObject* value, int async, int iternext); + +/* CoroutineBase.proto (used by Generator) */ +struct __pyx_CoroutineObject; +typedef PyObject *(*__pyx_coroutine_body_t)(struct __pyx_CoroutineObject *, PyThreadState *, PyObject *); +#if CYTHON_USE_EXC_INFO_STACK +#define __Pyx_ExcInfoStruct _PyErr_StackItem +#else +typedef struct { + PyObject *exc_type; + PyObject *exc_value; + PyObject *exc_traceback; +} __Pyx_ExcInfoStruct; +#endif +typedef struct __pyx_CoroutineObject { + PyObject_HEAD + __pyx_coroutine_body_t body; + PyObject *closure; + __Pyx_ExcInfoStruct gi_exc_state; +#if PY_VERSION_HEX < 0x030C0000 || CYTHON_COMPILING_IN_LIMITED_API + PyObject *gi_weakreflist; +#endif + PyObject *classobj; + PyObject *yieldfrom; + __Pyx_pyiter_sendfunc yieldfrom_am_send; + PyObject *gi_name; + PyObject *gi_qualname; + PyObject *gi_modulename; + PyObject *gi_code; + PyObject *gi_frame; +#if CYTHON_USE_SYS_MONITORING && (CYTHON_PROFILE || CYTHON_TRACE) + PyMonitoringState __pyx_pymonitoring_state[__Pyx_MonitoringEventTypes_CyGen_count]; + uint64_t __pyx_pymonitoring_version; +#endif + int resume_label; + char is_running; +} __pyx_CoroutineObject; +static __pyx_CoroutineObject *__Pyx__Coroutine_New( + PyTypeObject *type, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, + PyObject *name, PyObject *qualname, PyObject *module_name); +static __pyx_CoroutineObject *__Pyx__Coroutine_NewInit( + __pyx_CoroutineObject *gen, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, + PyObject *name, PyObject *qualname, PyObject *module_name); +static CYTHON_INLINE void __Pyx_Coroutine_ExceptionClear(__Pyx_ExcInfoStruct *self); +static int __Pyx_Coroutine_clear(PyObject *self); +static __Pyx_PySendResult __Pyx_Coroutine_AmSend(PyObject *self, PyObject *value, PyObject **retval); +static PyObject *__Pyx_Coroutine_Send(PyObject *self, PyObject *value); +static __Pyx_PySendResult __Pyx_Coroutine_Close(PyObject *self, PyObject **retval); +static PyObject *__Pyx_Coroutine_Throw(PyObject *gen, PyObject *args); +#if CYTHON_USE_EXC_INFO_STACK +#define __Pyx_Coroutine_SwapException(self) +#define __Pyx_Coroutine_ResetAndClearException(self) __Pyx_Coroutine_ExceptionClear(&(self)->gi_exc_state) +#else +#define __Pyx_Coroutine_SwapException(self) {\ + __Pyx_ExceptionSwap(&(self)->gi_exc_state.exc_type, &(self)->gi_exc_state.exc_value, &(self)->gi_exc_state.exc_traceback);\ + __Pyx_Coroutine_ResetFrameBackpointer(&(self)->gi_exc_state);\ + } +#define __Pyx_Coroutine_ResetAndClearException(self) {\ + __Pyx_ExceptionReset((self)->gi_exc_state.exc_type, (self)->gi_exc_state.exc_value, (self)->gi_exc_state.exc_traceback);\ + (self)->gi_exc_state.exc_type = (self)->gi_exc_state.exc_value = (self)->gi_exc_state.exc_traceback = NULL;\ + } +#endif +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyGen_FetchStopIterationValue(pvalue)\ + __Pyx_PyGen__FetchStopIterationValue(__pyx_tstate, pvalue) +#else +#define __Pyx_PyGen_FetchStopIterationValue(pvalue)\ + __Pyx_PyGen__FetchStopIterationValue(__Pyx_PyThreadState_Current, pvalue) +#endif +static int __Pyx_PyGen__FetchStopIterationValue(PyThreadState *tstate, PyObject **pvalue); +static CYTHON_INLINE void __Pyx_Coroutine_ResetFrameBackpointer(__Pyx_ExcInfoStruct *exc_state); +static char __Pyx_Coroutine_test_and_set_is_running(__pyx_CoroutineObject *gen); +static void __Pyx_Coroutine_unset_is_running(__pyx_CoroutineObject *gen); +static char __Pyx_Coroutine_get_is_running(__pyx_CoroutineObject *gen); +static PyObject *__Pyx_Coroutine_get_is_running_getter(PyObject *gen, void *closure); +#if __PYX_HAS_PY_AM_SEND == 2 +static void __Pyx_SetBackportTypeAmSend(PyTypeObject *type, __Pyx_PyAsyncMethodsStruct *static_amsend_methods, __Pyx_pyiter_sendfunc am_send); +#endif +static PyObject *__Pyx_Coroutine_fail_reduce_ex(PyObject *self, PyObject *arg); + +/* Generator.proto */ +#define __Pyx_Generator_USED +#define __Pyx_Generator_CheckExact(obj) __Pyx_IS_TYPE(obj, __pyx_mstate_global->__pyx_GeneratorType) +#define __Pyx_Generator_New(body, code, closure, name, qualname, module_name)\ + __Pyx__Coroutine_New(__pyx_mstate_global->__pyx_GeneratorType, body, code, closure, name, qualname, module_name) +static PyObject *__Pyx_Generator_Next(PyObject *self); +static int __pyx_Generator_init(PyObject *module); +static CYTHON_INLINE PyObject *__Pyx_Generator_GetInlinedResult(PyObject *self); + +/* CheckBinaryVersion.proto */ +static int __Pyx_check_binary_version(unsigned long ct_version, unsigned long rt_version, int allow_newer); + +/* DecompressString.proto */ +static PyObject *__Pyx_DecompressString(const char *s, Py_ssize_t length, int algo); + +/* MultiPhaseInitModuleState.proto */ +#if CYTHON_PEP489_MULTI_PHASE_INIT && CYTHON_USE_MODULE_STATE +static PyObject *__Pyx_State_FindModule(void*); +static int __Pyx_State_AddModule(PyObject* module, void*); +static int __Pyx_State_RemoveModule(void*); +#elif CYTHON_USE_MODULE_STATE +#define __Pyx_State_FindModule PyState_FindModule +#define __Pyx_State_AddModule PyState_AddModule +#define __Pyx_State_RemoveModule PyState_RemoveModule +#endif + +/* #### Code section: module_declarations ### */ +/* CythonABIVersion.proto */ +#if CYTHON_COMPILING_IN_LIMITED_API + #if CYTHON_METH_FASTCALL + #define __PYX_FASTCALL_ABI_SUFFIX "_fastcall" + #else + #define __PYX_FASTCALL_ABI_SUFFIX + #endif + #define __PYX_LIMITED_ABI_SUFFIX "limited" __PYX_FASTCALL_ABI_SUFFIX __PYX_AM_SEND_ABI_SUFFIX +#else + #define __PYX_LIMITED_ABI_SUFFIX +#endif +#if __PYX_HAS_PY_AM_SEND == 1 + #define __PYX_AM_SEND_ABI_SUFFIX +#elif __PYX_HAS_PY_AM_SEND == 2 + #define __PYX_AM_SEND_ABI_SUFFIX "amsendbackport" +#else + #define __PYX_AM_SEND_ABI_SUFFIX "noamsend" +#endif +#ifndef __PYX_MONITORING_ABI_SUFFIX + #define __PYX_MONITORING_ABI_SUFFIX +#endif +#if CYTHON_USE_TP_FINALIZE + #define __PYX_TP_FINALIZE_ABI_SUFFIX +#else + #define __PYX_TP_FINALIZE_ABI_SUFFIX "nofinalize" +#endif +#if CYTHON_USE_FREELISTS || !defined(__Pyx_AsyncGen_USED) + #define __PYX_FREELISTS_ABI_SUFFIX +#else + #define __PYX_FREELISTS_ABI_SUFFIX "nofreelists" +#endif +#define CYTHON_ABI __PYX_ABI_VERSION __PYX_LIMITED_ABI_SUFFIX __PYX_MONITORING_ABI_SUFFIX __PYX_TP_FINALIZE_ABI_SUFFIX __PYX_FREELISTS_ABI_SUFFIX __PYX_AM_SEND_ABI_SUFFIX +#define __PYX_ABI_MODULE_NAME "_cython_" CYTHON_ABI +#define __PYX_TYPE_MODULE_PREFIX __PYX_ABI_MODULE_NAME "." + + +/* Module declarations from "cython" */ + +/* Module declarations from "fontTools.cu2qu.cu2qu" */ +static CYTHON_INLINE double __pyx_f_9fontTools_5cu2qu_5cu2qu_dot(__pyx_t_double_complex, __pyx_t_double_complex); /*proto*/ +static PyObject *__pyx_f_9fontTools_5cu2qu_5cu2qu__complex_div_by_real(__pyx_t_double_complex, double); /*proto*/ +static CYTHON_INLINE PyObject *__pyx_f_9fontTools_5cu2qu_5cu2qu_calc_cubic_points(__pyx_t_double_complex, __pyx_t_double_complex, __pyx_t_double_complex, __pyx_t_double_complex); /*proto*/ +static CYTHON_INLINE PyObject *__pyx_f_9fontTools_5cu2qu_5cu2qu_calc_cubic_parameters(__pyx_t_double_complex, __pyx_t_double_complex, __pyx_t_double_complex, __pyx_t_double_complex); /*proto*/ +static CYTHON_INLINE PyObject *__pyx_f_9fontTools_5cu2qu_5cu2qu_split_cubic_into_n_iter(__pyx_t_double_complex, __pyx_t_double_complex, __pyx_t_double_complex, __pyx_t_double_complex, PyObject *); /*proto*/ +static CYTHON_INLINE PyObject *__pyx_f_9fontTools_5cu2qu_5cu2qu_split_cubic_into_two(__pyx_t_double_complex, __pyx_t_double_complex, __pyx_t_double_complex, __pyx_t_double_complex); /*proto*/ +static CYTHON_INLINE PyObject *__pyx_f_9fontTools_5cu2qu_5cu2qu_split_cubic_into_three(__pyx_t_double_complex, __pyx_t_double_complex, __pyx_t_double_complex, __pyx_t_double_complex); /*proto*/ +static CYTHON_INLINE __pyx_t_double_complex __pyx_f_9fontTools_5cu2qu_5cu2qu_cubic_approx_control(double, __pyx_t_double_complex, __pyx_t_double_complex, __pyx_t_double_complex, __pyx_t_double_complex); /*proto*/ +static CYTHON_INLINE __pyx_t_double_complex __pyx_f_9fontTools_5cu2qu_5cu2qu_calc_intersect(__pyx_t_double_complex, __pyx_t_double_complex, __pyx_t_double_complex, __pyx_t_double_complex); /*proto*/ +static int __pyx_f_9fontTools_5cu2qu_5cu2qu_cubic_farthest_fit_inside(__pyx_t_double_complex, __pyx_t_double_complex, __pyx_t_double_complex, __pyx_t_double_complex, double); /*proto*/ +static CYTHON_INLINE PyObject *__pyx_f_9fontTools_5cu2qu_5cu2qu_cubic_approx_quadratic(PyObject *, double); /*proto*/ +static PyObject *__pyx_f_9fontTools_5cu2qu_5cu2qu_cubic_approx_spline(PyObject *, int, double, int); /*proto*/ +/* #### Code section: typeinfo ### */ +/* #### Code section: before_global_var ### */ +#define __Pyx_MODULE_NAME "fontTools.cu2qu.cu2qu" +extern int __pyx_module_is_main_fontTools__cu2qu__cu2qu; +int __pyx_module_is_main_fontTools__cu2qu__cu2qu = 0; + +/* Implementation of "fontTools.cu2qu.cu2qu" */ +/* #### Code section: global_var ### */ +/* #### Code section: string_decls ### */ +/* #### Code section: decls ### */ +static PyObject *__pyx_pf_9fontTools_5cu2qu_5cu2qu__split_cubic_into_n_gen(CYTHON_UNUSED PyObject *__pyx_self, __pyx_t_double_complex __pyx_v_p0, __pyx_t_double_complex __pyx_v_p1, __pyx_t_double_complex __pyx_v_p2, __pyx_t_double_complex __pyx_v_p3, int __pyx_v_n); /* proto */ +static PyObject *__pyx_pf_9fontTools_5cu2qu_5cu2qu_3curve_to_quadratic(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_curve, double __pyx_v_max_err, int __pyx_v_all_quadratic); /* proto */ +static PyObject *__pyx_pf_9fontTools_5cu2qu_5cu2qu_5curves_to_quadratic(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_curves, PyObject *__pyx_v_max_errors, int __pyx_v_all_quadratic); /* proto */ +static PyObject *__pyx_tp_new_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ +/* #### Code section: late_includes ### */ +/* #### Code section: module_state ### */ +/* SmallCodeConfig */ +#ifndef CYTHON_SMALL_CODE +#if defined(__clang__) + #define CYTHON_SMALL_CODE +#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) + #define CYTHON_SMALL_CODE __attribute__((cold)) +#else + #define CYTHON_SMALL_CODE +#endif +#endif + +typedef struct { + PyObject *__pyx_d; + PyObject *__pyx_b; + PyObject *__pyx_cython_runtime; + PyObject *__pyx_empty_tuple; + PyObject *__pyx_empty_bytes; + PyObject *__pyx_empty_unicode; + PyObject *__pyx_type_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen; + PyTypeObject *__pyx_ptype_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen; + __Pyx_CachedCFunction __pyx_umethod_PyDict_Type_items; + __Pyx_CachedCFunction __pyx_umethod_PyDict_Type_pop; + __Pyx_CachedCFunction __pyx_umethod_PyDict_Type_values; + PyObject *__pyx_codeobj_tab[3]; + PyObject *__pyx_string_tab[80]; + PyObject *__pyx_number_tab[6]; +/* #### Code section: module_state_contents ### */ +/* IterNextPlain.module_state_decls */ +#if CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX < 0x030A0000 +PyObject *__Pyx_GetBuiltinNext_LimitedAPI_cache; +#endif + + +#if CYTHON_USE_FREELISTS +struct __pyx_obj_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen *__pyx_freelist_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen[8]; +int __pyx_freecount_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen; +#endif +/* CommonTypesMetaclass.module_state_decls */ +PyTypeObject *__pyx_CommonTypesMetaclassType; + +/* CachedMethodType.module_state_decls */ +#if CYTHON_COMPILING_IN_LIMITED_API +PyObject *__Pyx_CachedMethodType; +#endif + +/* CythonFunctionShared.module_state_decls */ +PyTypeObject *__pyx_CyFunctionType; + +/* CodeObjectCache.module_state_decls */ +struct __Pyx_CodeObjectCache __pyx_code_cache; + +/* Generator.module_state_decls */ +PyTypeObject *__pyx_GeneratorType; + +/* #### Code section: module_state_end ### */ +} __pyx_mstatetype; + +#if CYTHON_USE_MODULE_STATE +#ifdef __cplusplus +namespace { +extern struct PyModuleDef __pyx_moduledef; +} /* anonymous namespace */ +#else +static struct PyModuleDef __pyx_moduledef; +#endif + +#define __pyx_mstate_global (__Pyx_PyModule_GetState(__Pyx_State_FindModule(&__pyx_moduledef))) + +#define __pyx_m (__Pyx_State_FindModule(&__pyx_moduledef)) +#else +static __pyx_mstatetype __pyx_mstate_global_static = +#ifdef __cplusplus + {}; +#else + {0}; +#endif +static __pyx_mstatetype * const __pyx_mstate_global = &__pyx_mstate_global_static; +#endif +/* #### Code section: constant_name_defines ### */ +#define __pyx_kp_u_ __pyx_string_tab[0] +#define __pyx_kp_u_Lib_fontTools_cu2qu_cu2qu_py __pyx_string_tab[1] +#define __pyx_kp_u_Return_quadratic_Bezier_splines __pyx_string_tab[2] +#define __pyx_kp_u__2 __pyx_string_tab[3] +#define __pyx_kp_u_curves_to_quadratic_line_503 __pyx_string_tab[4] +#define __pyx_kp_u_disable __pyx_string_tab[5] +#define __pyx_kp_u_enable __pyx_string_tab[6] +#define __pyx_kp_u_fontTools_cu2qu_errors __pyx_string_tab[7] +#define __pyx_kp_u_gc __pyx_string_tab[8] +#define __pyx_kp_u_isenabled __pyx_string_tab[9] +#define __pyx_n_u_ApproxNotFoundError __pyx_string_tab[10] +#define __pyx_n_u_COMPILED __pyx_string_tab[11] +#define __pyx_n_u_Cu2QuError __pyx_string_tab[12] +#define __pyx_n_u_Error __pyx_string_tab[13] +#define __pyx_n_u_MAX_N __pyx_string_tab[14] +#define __pyx_n_u_NAN __pyx_string_tab[15] +#define __pyx_n_u_NaN __pyx_string_tab[16] +#define __pyx_n_u_Pyx_PyDict_NextRef __pyx_string_tab[17] +#define __pyx_n_u_a __pyx_string_tab[18] +#define __pyx_n_u_a1 __pyx_string_tab[19] +#define __pyx_n_u_all __pyx_string_tab[20] +#define __pyx_n_u_all_quadratic __pyx_string_tab[21] +#define __pyx_n_u_asyncio_coroutines __pyx_string_tab[22] +#define __pyx_n_u_b __pyx_string_tab[23] +#define __pyx_n_u_b1 __pyx_string_tab[24] +#define __pyx_n_u_c __pyx_string_tab[25] +#define __pyx_n_u_c1 __pyx_string_tab[26] +#define __pyx_n_u_cline_in_traceback __pyx_string_tab[27] +#define __pyx_n_u_close __pyx_string_tab[28] +#define __pyx_n_u_curve __pyx_string_tab[29] +#define __pyx_n_u_curve_to_quadratic __pyx_string_tab[30] +#define __pyx_n_u_curves __pyx_string_tab[31] +#define __pyx_n_u_curves_to_quadratic __pyx_string_tab[32] +#define __pyx_n_u_d __pyx_string_tab[33] +#define __pyx_n_u_d1 __pyx_string_tab[34] +#define __pyx_n_u_delta_2 __pyx_string_tab[35] +#define __pyx_n_u_delta_3 __pyx_string_tab[36] +#define __pyx_n_u_dt __pyx_string_tab[37] +#define __pyx_n_u_errors __pyx_string_tab[38] +#define __pyx_n_u_fontTools_cu2qu_cu2qu __pyx_string_tab[39] +#define __pyx_n_u_func __pyx_string_tab[40] +#define __pyx_n_u_i __pyx_string_tab[41] +#define __pyx_n_u_imag __pyx_string_tab[42] +#define __pyx_n_u_is_coroutine __pyx_string_tab[43] +#define __pyx_n_u_isnan __pyx_string_tab[44] +#define __pyx_n_u_items __pyx_string_tab[45] +#define __pyx_n_u_l __pyx_string_tab[46] +#define __pyx_n_u_last_i __pyx_string_tab[47] +#define __pyx_n_u_main __pyx_string_tab[48] +#define __pyx_n_u_math __pyx_string_tab[49] +#define __pyx_n_u_max_err __pyx_string_tab[50] +#define __pyx_n_u_max_errors __pyx_string_tab[51] +#define __pyx_n_u_module __pyx_string_tab[52] +#define __pyx_n_u_n __pyx_string_tab[53] +#define __pyx_n_u_name __pyx_string_tab[54] +#define __pyx_n_u_next __pyx_string_tab[55] +#define __pyx_n_u_p __pyx_string_tab[56] +#define __pyx_n_u_p0 __pyx_string_tab[57] +#define __pyx_n_u_p1 __pyx_string_tab[58] +#define __pyx_n_u_p2 __pyx_string_tab[59] +#define __pyx_n_u_p3 __pyx_string_tab[60] +#define __pyx_n_u_pop __pyx_string_tab[61] +#define __pyx_n_u_qualname __pyx_string_tab[62] +#define __pyx_n_u_real __pyx_string_tab[63] +#define __pyx_n_u_s __pyx_string_tab[64] +#define __pyx_n_u_send __pyx_string_tab[65] +#define __pyx_n_u_set_name __pyx_string_tab[66] +#define __pyx_n_u_setdefault __pyx_string_tab[67] +#define __pyx_n_u_spline __pyx_string_tab[68] +#define __pyx_n_u_splines __pyx_string_tab[69] +#define __pyx_n_u_split_cubic_into_n_gen __pyx_string_tab[70] +#define __pyx_n_u_t1 __pyx_string_tab[71] +#define __pyx_n_u_t1_2 __pyx_string_tab[72] +#define __pyx_n_u_test __pyx_string_tab[73] +#define __pyx_n_u_throw __pyx_string_tab[74] +#define __pyx_n_u_value __pyx_string_tab[75] +#define __pyx_n_u_values __pyx_string_tab[76] +#define __pyx_kp_b_iso88591_AWBc_U_U_3fBa_AWCy_7_2QgQgT_a_Q __pyx_string_tab[77] +#define __pyx_kp_b_iso88591_J_Qawb_4uG4y_3a_3c_1A_avRq_T_AV __pyx_string_tab[78] +#define __pyx_kp_b_iso88591__3 __pyx_string_tab[79] +#define __pyx_int_1 __pyx_number_tab[0] +#define __pyx_int_2 __pyx_number_tab[1] +#define __pyx_int_3 __pyx_number_tab[2] +#define __pyx_int_4 __pyx_number_tab[3] +#define __pyx_int_6 __pyx_number_tab[4] +#define __pyx_int_100 __pyx_number_tab[5] +/* #### Code section: module_state_clear ### */ +#if CYTHON_USE_MODULE_STATE +static CYTHON_SMALL_CODE int __pyx_m_clear(PyObject *m) { + __pyx_mstatetype *clear_module_state = __Pyx_PyModule_GetState(m); + if (!clear_module_state) return 0; + Py_CLEAR(clear_module_state->__pyx_d); + Py_CLEAR(clear_module_state->__pyx_b); + Py_CLEAR(clear_module_state->__pyx_cython_runtime); + Py_CLEAR(clear_module_state->__pyx_empty_tuple); + Py_CLEAR(clear_module_state->__pyx_empty_bytes); + Py_CLEAR(clear_module_state->__pyx_empty_unicode); + #if CYTHON_PEP489_MULTI_PHASE_INIT + __Pyx_State_RemoveModule(NULL); + #endif + Py_CLEAR(clear_module_state->__pyx_ptype_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen); + Py_CLEAR(clear_module_state->__pyx_type_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen); + for (int i=0; i<3; ++i) { Py_CLEAR(clear_module_state->__pyx_codeobj_tab[i]); } + for (int i=0; i<80; ++i) { Py_CLEAR(clear_module_state->__pyx_string_tab[i]); } + for (int i=0; i<6; ++i) { Py_CLEAR(clear_module_state->__pyx_number_tab[i]); } +/* #### Code section: module_state_clear_contents ### */ +/* CommonTypesMetaclass.module_state_clear */ +Py_CLEAR(clear_module_state->__pyx_CommonTypesMetaclassType); + +/* CythonFunctionShared.module_state_clear */ +Py_CLEAR(clear_module_state->__pyx_CyFunctionType); + +/* Generator.module_state_clear */ +Py_CLEAR(clear_module_state->__pyx_GeneratorType); + +/* #### Code section: module_state_clear_end ### */ +return 0; +} +#endif +/* #### Code section: module_state_traverse ### */ +#if CYTHON_USE_MODULE_STATE +static CYTHON_SMALL_CODE int __pyx_m_traverse(PyObject *m, visitproc visit, void *arg) { + __pyx_mstatetype *traverse_module_state = __Pyx_PyModule_GetState(m); + if (!traverse_module_state) return 0; + Py_VISIT(traverse_module_state->__pyx_d); + Py_VISIT(traverse_module_state->__pyx_b); + Py_VISIT(traverse_module_state->__pyx_cython_runtime); + __Pyx_VISIT_CONST(traverse_module_state->__pyx_empty_tuple); + __Pyx_VISIT_CONST(traverse_module_state->__pyx_empty_bytes); + __Pyx_VISIT_CONST(traverse_module_state->__pyx_empty_unicode); + Py_VISIT(traverse_module_state->__pyx_ptype_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen); + Py_VISIT(traverse_module_state->__pyx_type_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen); + for (int i=0; i<3; ++i) { __Pyx_VISIT_CONST(traverse_module_state->__pyx_codeobj_tab[i]); } + for (int i=0; i<80; ++i) { __Pyx_VISIT_CONST(traverse_module_state->__pyx_string_tab[i]); } + for (int i=0; i<6; ++i) { __Pyx_VISIT_CONST(traverse_module_state->__pyx_number_tab[i]); } +/* #### Code section: module_state_traverse_contents ### */ +/* CommonTypesMetaclass.module_state_traverse */ +Py_VISIT(traverse_module_state->__pyx_CommonTypesMetaclassType); + +/* CythonFunctionShared.module_state_traverse */ +Py_VISIT(traverse_module_state->__pyx_CyFunctionType); + +/* Generator.module_state_traverse */ +Py_VISIT(traverse_module_state->__pyx_GeneratorType); + +/* #### Code section: module_state_traverse_end ### */ +return 0; +} +#endif +/* #### Code section: module_code ### */ + +/* "fontTools/cu2qu/cu2qu.py":37 + * + * + * @cython.cfunc # <<<<<<<<<<<<<< + * @cython.inline + * @cython.returns(cython.double) +*/ + +static CYTHON_INLINE double __pyx_f_9fontTools_5cu2qu_5cu2qu_dot(__pyx_t_double_complex __pyx_v_v1, __pyx_t_double_complex __pyx_v_v2) { + double __pyx_v_result; + double __pyx_r; + double __pyx_t_1; + int __pyx_t_2; + + /* "fontTools/cu2qu/cu2qu.py":51 + * double: Dot product. + * """ + * result = (v1 * v2.conjugate()).real # <<<<<<<<<<<<<< + * # When vectors are perpendicular (i.e. dot product is 0), the above expression may + * # yield slightly different results when running in pure Python vs C/Cython, +*/ + __pyx_t_1 = __Pyx_CREAL(__Pyx_c_prod_double(__pyx_v_v1, __Pyx_c_conj_double(__pyx_v_v2))); + __pyx_v_result = __pyx_t_1; + + /* "fontTools/cu2qu/cu2qu.py":58 + * # implementation. Because we are using the result in a denominator and catching + * # ZeroDivisionError (see `calc_intersect`), it's best to normalize the result here. + * if abs(result) < 1e-15: # <<<<<<<<<<<<<< + * result = 0.0 + * return result +*/ + __pyx_t_1 = fabs(__pyx_v_result); + __pyx_t_2 = (__pyx_t_1 < 1e-15); + if (__pyx_t_2) { + + /* "fontTools/cu2qu/cu2qu.py":59 + * # ZeroDivisionError (see `calc_intersect`), it's best to normalize the result here. + * if abs(result) < 1e-15: + * result = 0.0 # <<<<<<<<<<<<<< + * return result + * +*/ + __pyx_v_result = 0.0; + + /* "fontTools/cu2qu/cu2qu.py":58 + * # implementation. Because we are using the result in a denominator and catching + * # ZeroDivisionError (see `calc_intersect`), it's best to normalize the result here. + * if abs(result) < 1e-15: # <<<<<<<<<<<<<< + * result = 0.0 + * return result +*/ + } + + /* "fontTools/cu2qu/cu2qu.py":60 + * if abs(result) < 1e-15: + * result = 0.0 + * return result # <<<<<<<<<<<<<< + * + * +*/ + __pyx_r = __pyx_v_result; + goto __pyx_L0; + + /* "fontTools/cu2qu/cu2qu.py":37 + * + * + * @cython.cfunc # <<<<<<<<<<<<<< + * @cython.inline + * @cython.returns(cython.double) +*/ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "fontTools/cu2qu/cu2qu.py":63 + * + * + * @cython.cfunc # <<<<<<<<<<<<<< + * @cython.locals(z=cython.complex, den=cython.double) + * @cython.locals(zr=cython.double, zi=cython.double) +*/ + +static PyObject *__pyx_f_9fontTools_5cu2qu_5cu2qu__complex_div_by_real(__pyx_t_double_complex __pyx_v_z, double __pyx_v_den) { + double __pyx_v_zr; + double __pyx_v_zi; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + double __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + size_t __pyx_t_6; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("_complex_div_by_real", 0); + + /* "fontTools/cu2qu/cu2qu.py":75 + * https://github.com/fonttools/fonttools/issues/3928 + * """ + * zr = z.real # <<<<<<<<<<<<<< + * zi = z.imag + * return complex(zr / den, zi / den) +*/ + __pyx_t_1 = __Pyx_CREAL(__pyx_v_z); + __pyx_v_zr = __pyx_t_1; + + /* "fontTools/cu2qu/cu2qu.py":76 + * """ + * zr = z.real + * zi = z.imag # <<<<<<<<<<<<<< + * return complex(zr / den, zi / den) + * +*/ + __pyx_t_1 = __Pyx_CIMAG(__pyx_v_z); + __pyx_v_zi = __pyx_t_1; + + /* "fontTools/cu2qu/cu2qu.py":77 + * zr = z.real + * zi = z.imag + * return complex(zr / den, zi / den) # <<<<<<<<<<<<<< + * + * +*/ + __Pyx_XDECREF(__pyx_r); + __pyx_t_3 = NULL; + if (unlikely(__pyx_v_den == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + __PYX_ERR(0, 77, __pyx_L1_error) + } + __pyx_t_4 = PyFloat_FromDouble((__pyx_v_zr / __pyx_v_den)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 77, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (unlikely(__pyx_v_den == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + __PYX_ERR(0, 77, __pyx_L1_error) + } + __pyx_t_5 = PyFloat_FromDouble((__pyx_v_zi / __pyx_v_den)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 77, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = 1; + { + PyObject *__pyx_callargs[3] = {__pyx_t_3, __pyx_t_4, __pyx_t_5}; + __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)(&PyComplex_Type), __pyx_callargs+__pyx_t_6, (3-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 77, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + } + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "fontTools/cu2qu/cu2qu.py":63 + * + * + * @cython.cfunc # <<<<<<<<<<<<<< + * @cython.locals(z=cython.complex, den=cython.double) + * @cython.locals(zr=cython.double, zi=cython.double) +*/ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("fontTools.cu2qu.cu2qu._complex_div_by_real", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "fontTools/cu2qu/cu2qu.py":80 + * + * + * @cython.cfunc # <<<<<<<<<<<<<< + * @cython.inline + * @cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex) +*/ + +static CYTHON_INLINE PyObject *__pyx_f_9fontTools_5cu2qu_5cu2qu_calc_cubic_points(__pyx_t_double_complex __pyx_v_a, __pyx_t_double_complex __pyx_v_b, __pyx_t_double_complex __pyx_v_c, __pyx_t_double_complex __pyx_v_d) { + __pyx_t_double_complex __pyx_v__1; + __pyx_t_double_complex __pyx_v__2; + __pyx_t_double_complex __pyx_v__3; + __pyx_t_double_complex __pyx_v__4; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + __pyx_t_double_complex __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("calc_cubic_points", 0); + + /* "fontTools/cu2qu/cu2qu.py":87 + * ) + * def calc_cubic_points(a, b, c, d): + * _1 = d # <<<<<<<<<<<<<< + * _2 = _complex_div_by_real(c, 3.0) + d + * _3 = _complex_div_by_real(b + c, 3.0) + _2 +*/ + __pyx_v__1 = __pyx_v_d; + + /* "fontTools/cu2qu/cu2qu.py":88 + * def calc_cubic_points(a, b, c, d): + * _1 = d + * _2 = _complex_div_by_real(c, 3.0) + d # <<<<<<<<<<<<<< + * _3 = _complex_div_by_real(b + c, 3.0) + _2 + * _4 = a + d + c + b +*/ + __pyx_t_1 = __pyx_f_9fontTools_5cu2qu_5cu2qu__complex_div_by_real(__pyx_v_c, 3.0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 88, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __pyx_PyComplex_FromComplex(__pyx_v_d); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 88, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyNumber_Add(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 88, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_4 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_3); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 88, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v__2 = __pyx_t_4; + + /* "fontTools/cu2qu/cu2qu.py":89 + * _1 = d + * _2 = _complex_div_by_real(c, 3.0) + d + * _3 = _complex_div_by_real(b + c, 3.0) + _2 # <<<<<<<<<<<<<< + * _4 = a + d + c + b + * return _1, _2, _3, _4 +*/ + __pyx_t_3 = __pyx_f_9fontTools_5cu2qu_5cu2qu__complex_div_by_real(__Pyx_c_sum_double(__pyx_v_b, __pyx_v_c), 3.0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 89, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_2 = __pyx_PyComplex_FromComplex(__pyx_v__2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 89, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_1 = PyNumber_Add(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 89, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_4 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_1); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 89, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_v__3 = __pyx_t_4; + + /* "fontTools/cu2qu/cu2qu.py":90 + * _2 = _complex_div_by_real(c, 3.0) + d + * _3 = _complex_div_by_real(b + c, 3.0) + _2 + * _4 = a + d + c + b # <<<<<<<<<<<<<< + * return _1, _2, _3, _4 + * +*/ + __pyx_v__4 = __Pyx_c_sum_double(__Pyx_c_sum_double(__Pyx_c_sum_double(__pyx_v_a, __pyx_v_d), __pyx_v_c), __pyx_v_b); + + /* "fontTools/cu2qu/cu2qu.py":91 + * _3 = _complex_div_by_real(b + c, 3.0) + _2 + * _4 = a + d + c + b + * return _1, _2, _3, _4 # <<<<<<<<<<<<<< + * + * +*/ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __pyx_PyComplex_FromComplex(__pyx_v__1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 91, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __pyx_PyComplex_FromComplex(__pyx_v__2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 91, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __pyx_PyComplex_FromComplex(__pyx_v__3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 91, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = __pyx_PyComplex_FromComplex(__pyx_v__4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 91, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = PyTuple_New(4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 91, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GIVEREF(__pyx_t_1); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 91, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_2); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_2) != (0)) __PYX_ERR(0, 91, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_3); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_t_3) != (0)) __PYX_ERR(0, 91, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_5); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 3, __pyx_t_5) != (0)) __PYX_ERR(0, 91, __pyx_L1_error); + __pyx_t_1 = 0; + __pyx_t_2 = 0; + __pyx_t_3 = 0; + __pyx_t_5 = 0; + __pyx_r = __pyx_t_6; + __pyx_t_6 = 0; + goto __pyx_L0; + + /* "fontTools/cu2qu/cu2qu.py":80 + * + * + * @cython.cfunc # <<<<<<<<<<<<<< + * @cython.inline + * @cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex) +*/ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_AddTraceback("fontTools.cu2qu.cu2qu.calc_cubic_points", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "fontTools/cu2qu/cu2qu.py":94 + * + * + * @cython.cfunc # <<<<<<<<<<<<<< + * @cython.inline + * @cython.locals( +*/ + +static CYTHON_INLINE PyObject *__pyx_f_9fontTools_5cu2qu_5cu2qu_calc_cubic_parameters(__pyx_t_double_complex __pyx_v_p0, __pyx_t_double_complex __pyx_v_p1, __pyx_t_double_complex __pyx_v_p2, __pyx_t_double_complex __pyx_v_p3) { + __pyx_t_double_complex __pyx_v_a; + __pyx_t_double_complex __pyx_v_b; + __pyx_t_double_complex __pyx_v_c; + __pyx_t_double_complex __pyx_v_d; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("calc_cubic_parameters", 0); + + /* "fontTools/cu2qu/cu2qu.py":101 + * @cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex) + * def calc_cubic_parameters(p0, p1, p2, p3): + * c = (p1 - p0) * 3.0 # <<<<<<<<<<<<<< + * b = (p2 - p1) * 3.0 - c + * d = p0 +*/ + __pyx_v_c = __Pyx_c_prod_double(__Pyx_c_diff_double(__pyx_v_p1, __pyx_v_p0), __pyx_t_double_complex_from_parts(3.0, 0)); + + /* "fontTools/cu2qu/cu2qu.py":102 + * def calc_cubic_parameters(p0, p1, p2, p3): + * c = (p1 - p0) * 3.0 + * b = (p2 - p1) * 3.0 - c # <<<<<<<<<<<<<< + * d = p0 + * a = p3 - d - c - b +*/ + __pyx_v_b = __Pyx_c_diff_double(__Pyx_c_prod_double(__Pyx_c_diff_double(__pyx_v_p2, __pyx_v_p1), __pyx_t_double_complex_from_parts(3.0, 0)), __pyx_v_c); + + /* "fontTools/cu2qu/cu2qu.py":103 + * c = (p1 - p0) * 3.0 + * b = (p2 - p1) * 3.0 - c + * d = p0 # <<<<<<<<<<<<<< + * a = p3 - d - c - b + * return a, b, c, d +*/ + __pyx_v_d = __pyx_v_p0; + + /* "fontTools/cu2qu/cu2qu.py":104 + * b = (p2 - p1) * 3.0 - c + * d = p0 + * a = p3 - d - c - b # <<<<<<<<<<<<<< + * return a, b, c, d + * +*/ + __pyx_v_a = __Pyx_c_diff_double(__Pyx_c_diff_double(__Pyx_c_diff_double(__pyx_v_p3, __pyx_v_d), __pyx_v_c), __pyx_v_b); + + /* "fontTools/cu2qu/cu2qu.py":105 + * d = p0 + * a = p3 - d - c - b + * return a, b, c, d # <<<<<<<<<<<<<< + * + * +*/ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __pyx_PyComplex_FromComplex(__pyx_v_a); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 105, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __pyx_PyComplex_FromComplex(__pyx_v_b); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 105, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __pyx_PyComplex_FromComplex(__pyx_v_c); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 105, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __pyx_PyComplex_FromComplex(__pyx_v_d); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 105, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 105, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GIVEREF(__pyx_t_1); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 105, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_2); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2) != (0)) __PYX_ERR(0, 105, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_3); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3) != (0)) __PYX_ERR(0, 105, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_4); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4) != (0)) __PYX_ERR(0, 105, __pyx_L1_error); + __pyx_t_1 = 0; + __pyx_t_2 = 0; + __pyx_t_3 = 0; + __pyx_t_4 = 0; + __pyx_r = __pyx_t_5; + __pyx_t_5 = 0; + goto __pyx_L0; + + /* "fontTools/cu2qu/cu2qu.py":94 + * + * + * @cython.cfunc # <<<<<<<<<<<<<< + * @cython.inline + * @cython.locals( +*/ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("fontTools.cu2qu.cu2qu.calc_cubic_parameters", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "fontTools/cu2qu/cu2qu.py":108 + * + * + * @cython.cfunc # <<<<<<<<<<<<<< + * @cython.inline + * @cython.locals( +*/ + +static CYTHON_INLINE PyObject *__pyx_f_9fontTools_5cu2qu_5cu2qu_split_cubic_into_n_iter(__pyx_t_double_complex __pyx_v_p0, __pyx_t_double_complex __pyx_v_p1, __pyx_t_double_complex __pyx_v_p2, __pyx_t_double_complex __pyx_v_p3, PyObject *__pyx_v_n) { + PyObject *__pyx_v_a = NULL; + PyObject *__pyx_v_b = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *(*__pyx_t_6)(PyObject *); + __pyx_t_double_complex __pyx_t_7; + __pyx_t_double_complex __pyx_t_8; + __pyx_t_double_complex __pyx_t_9; + __pyx_t_double_complex __pyx_t_10; + PyObject *__pyx_t_11 = NULL; + PyObject *__pyx_t_12 = NULL; + PyObject *__pyx_t_13 = NULL; + size_t __pyx_t_14; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("split_cubic_into_n_iter", 0); + + /* "fontTools/cu2qu/cu2qu.py":130 + * """ + * # Hand-coded special-cases + * if n == 2: # <<<<<<<<<<<<<< + * return iter(split_cubic_into_two(p0, p1, p2, p3)) + * if n == 3: +*/ + __pyx_t_1 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_n, __pyx_mstate_global->__pyx_int_2, 2, 0)); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 130, __pyx_L1_error) + if (__pyx_t_1) { + + /* "fontTools/cu2qu/cu2qu.py":131 + * # Hand-coded special-cases + * if n == 2: + * return iter(split_cubic_into_two(p0, p1, p2, p3)) # <<<<<<<<<<<<<< + * if n == 3: + * return iter(split_cubic_into_three(p0, p1, p2, p3)) +*/ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __pyx_f_9fontTools_5cu2qu_5cu2qu_split_cubic_into_two(__pyx_v_p0, __pyx_v_p1, __pyx_v_p2, __pyx_v_p3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 131, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 131, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "fontTools/cu2qu/cu2qu.py":130 + * """ + * # Hand-coded special-cases + * if n == 2: # <<<<<<<<<<<<<< + * return iter(split_cubic_into_two(p0, p1, p2, p3)) + * if n == 3: +*/ + } + + /* "fontTools/cu2qu/cu2qu.py":132 + * if n == 2: + * return iter(split_cubic_into_two(p0, p1, p2, p3)) + * if n == 3: # <<<<<<<<<<<<<< + * return iter(split_cubic_into_three(p0, p1, p2, p3)) + * if n == 4: +*/ + __pyx_t_1 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_n, __pyx_mstate_global->__pyx_int_3, 3, 0)); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 132, __pyx_L1_error) + if (__pyx_t_1) { + + /* "fontTools/cu2qu/cu2qu.py":133 + * return iter(split_cubic_into_two(p0, p1, p2, p3)) + * if n == 3: + * return iter(split_cubic_into_three(p0, p1, p2, p3)) # <<<<<<<<<<<<<< + * if n == 4: + * a, b = split_cubic_into_two(p0, p1, p2, p3) +*/ + __Pyx_XDECREF(__pyx_r); + __pyx_t_3 = __pyx_f_9fontTools_5cu2qu_5cu2qu_split_cubic_into_three(__pyx_v_p0, __pyx_v_p1, __pyx_v_p2, __pyx_v_p3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 133, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_2 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 133, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "fontTools/cu2qu/cu2qu.py":132 + * if n == 2: + * return iter(split_cubic_into_two(p0, p1, p2, p3)) + * if n == 3: # <<<<<<<<<<<<<< + * return iter(split_cubic_into_three(p0, p1, p2, p3)) + * if n == 4: +*/ + } + + /* "fontTools/cu2qu/cu2qu.py":134 + * if n == 3: + * return iter(split_cubic_into_three(p0, p1, p2, p3)) + * if n == 4: # <<<<<<<<<<<<<< + * a, b = split_cubic_into_two(p0, p1, p2, p3) + * return iter( +*/ + __pyx_t_1 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_n, __pyx_mstate_global->__pyx_int_4, 4, 0)); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 134, __pyx_L1_error) + if (__pyx_t_1) { + + /* "fontTools/cu2qu/cu2qu.py":135 + * return iter(split_cubic_into_three(p0, p1, p2, p3)) + * if n == 4: + * a, b = split_cubic_into_two(p0, p1, p2, p3) # <<<<<<<<<<<<<< + * return iter( + * split_cubic_into_two(a[0], a[1], a[2], a[3]) +*/ + __pyx_t_2 = __pyx_f_9fontTools_5cu2qu_5cu2qu_split_cubic_into_two(__pyx_v_p0, __pyx_v_p1, __pyx_v_p2, __pyx_v_p3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 135, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if ((likely(PyTuple_CheckExact(__pyx_t_2))) || (PyList_CheckExact(__pyx_t_2))) { + PyObject* sequence = __pyx_t_2; + Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); + if (unlikely(size != 2)) { + if (size > 2) __Pyx_RaiseTooManyValuesError(2); + else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); + __PYX_ERR(0, 135, __pyx_L1_error) + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + if (likely(PyTuple_CheckExact(sequence))) { + __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); + __Pyx_INCREF(__pyx_t_3); + __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); + __Pyx_INCREF(__pyx_t_4); + } else { + __pyx_t_3 = __Pyx_PyList_GetItemRefFast(sequence, 0, __Pyx_ReferenceSharing_SharedReference); + if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 135, __pyx_L1_error) + __Pyx_XGOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyList_GetItemRefFast(sequence, 1, __Pyx_ReferenceSharing_SharedReference); + if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 135, __pyx_L1_error) + __Pyx_XGOTREF(__pyx_t_4); + } + #else + __pyx_t_3 = __Pyx_PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 135, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 135, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + #endif + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } else { + Py_ssize_t index = -1; + __pyx_t_5 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 135, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_6 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_5); + index = 0; __pyx_t_3 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_3)) goto __pyx_L6_unpacking_failed; + __Pyx_GOTREF(__pyx_t_3); + index = 1; __pyx_t_4 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_4)) goto __pyx_L6_unpacking_failed; + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_IternextUnpackEndCheck(__pyx_t_6(__pyx_t_5), 2) < (0)) __PYX_ERR(0, 135, __pyx_L1_error) + __pyx_t_6 = NULL; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + goto __pyx_L7_unpacking_done; + __pyx_L6_unpacking_failed:; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_t_6 = NULL; + if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); + __PYX_ERR(0, 135, __pyx_L1_error) + __pyx_L7_unpacking_done:; + } + __pyx_v_a = __pyx_t_3; + __pyx_t_3 = 0; + __pyx_v_b = __pyx_t_4; + __pyx_t_4 = 0; + + /* "fontTools/cu2qu/cu2qu.py":136 + * if n == 4: + * a, b = split_cubic_into_two(p0, p1, p2, p3) + * return iter( # <<<<<<<<<<<<<< + * split_cubic_into_two(a[0], a[1], a[2], a[3]) + * + split_cubic_into_two(b[0], b[1], b[2], b[3]) +*/ + __Pyx_XDECREF(__pyx_r); + + /* "fontTools/cu2qu/cu2qu.py":137 + * a, b = split_cubic_into_two(p0, p1, p2, p3) + * return iter( + * split_cubic_into_two(a[0], a[1], a[2], a[3]) # <<<<<<<<<<<<<< + * + split_cubic_into_two(b[0], b[1], b[2], b[3]) + * ) +*/ + __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_a, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 137, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_7 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_2); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 137, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_a, 1, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 137, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_8 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_2); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 137, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_a, 2, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 137, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_9 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_2); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 137, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_a, 3, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 137, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_10 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_2); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 137, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __pyx_f_9fontTools_5cu2qu_5cu2qu_split_cubic_into_two(__pyx_t_7, __pyx_t_8, __pyx_t_9, __pyx_t_10); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 137, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + + /* "fontTools/cu2qu/cu2qu.py":138 + * return iter( + * split_cubic_into_two(a[0], a[1], a[2], a[3]) + * + split_cubic_into_two(b[0], b[1], b[2], b[3]) # <<<<<<<<<<<<<< + * ) + * if n == 6: +*/ + __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_b, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 138, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_10 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_4); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 138, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_b, 1, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 138, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_9 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_4); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 138, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_b, 2, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 138, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_8 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_4); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 138, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_b, 3, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 138, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_7 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_4); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 138, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = __pyx_f_9fontTools_5cu2qu_5cu2qu_split_cubic_into_two(__pyx_t_10, __pyx_t_9, __pyx_t_8, __pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 138, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyNumber_Add(__pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 138, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "fontTools/cu2qu/cu2qu.py":136 + * if n == 4: + * a, b = split_cubic_into_two(p0, p1, p2, p3) + * return iter( # <<<<<<<<<<<<<< + * split_cubic_into_two(a[0], a[1], a[2], a[3]) + * + split_cubic_into_two(b[0], b[1], b[2], b[3]) +*/ + __pyx_t_4 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 136, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_r = __pyx_t_4; + __pyx_t_4 = 0; + goto __pyx_L0; + + /* "fontTools/cu2qu/cu2qu.py":134 + * if n == 3: + * return iter(split_cubic_into_three(p0, p1, p2, p3)) + * if n == 4: # <<<<<<<<<<<<<< + * a, b = split_cubic_into_two(p0, p1, p2, p3) + * return iter( +*/ + } + + /* "fontTools/cu2qu/cu2qu.py":140 + * + split_cubic_into_two(b[0], b[1], b[2], b[3]) + * ) + * if n == 6: # <<<<<<<<<<<<<< + * a, b = split_cubic_into_two(p0, p1, p2, p3) + * return iter( +*/ + __pyx_t_1 = (__Pyx_PyLong_BoolEqObjC(__pyx_v_n, __pyx_mstate_global->__pyx_int_6, 6, 0)); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 140, __pyx_L1_error) + if (__pyx_t_1) { + + /* "fontTools/cu2qu/cu2qu.py":141 + * ) + * if n == 6: + * a, b = split_cubic_into_two(p0, p1, p2, p3) # <<<<<<<<<<<<<< + * return iter( + * split_cubic_into_three(a[0], a[1], a[2], a[3]) +*/ + __pyx_t_4 = __pyx_f_9fontTools_5cu2qu_5cu2qu_split_cubic_into_two(__pyx_v_p0, __pyx_v_p1, __pyx_v_p2, __pyx_v_p3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 141, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if ((likely(PyTuple_CheckExact(__pyx_t_4))) || (PyList_CheckExact(__pyx_t_4))) { + PyObject* sequence = __pyx_t_4; + Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); + if (unlikely(size != 2)) { + if (size > 2) __Pyx_RaiseTooManyValuesError(2); + else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); + __PYX_ERR(0, 141, __pyx_L1_error) + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + if (likely(PyTuple_CheckExact(sequence))) { + __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); + __Pyx_INCREF(__pyx_t_3); + __pyx_t_2 = PyTuple_GET_ITEM(sequence, 1); + __Pyx_INCREF(__pyx_t_2); + } else { + __pyx_t_3 = __Pyx_PyList_GetItemRefFast(sequence, 0, __Pyx_ReferenceSharing_SharedReference); + if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 141, __pyx_L1_error) + __Pyx_XGOTREF(__pyx_t_3); + __pyx_t_2 = __Pyx_PyList_GetItemRefFast(sequence, 1, __Pyx_ReferenceSharing_SharedReference); + if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 141, __pyx_L1_error) + __Pyx_XGOTREF(__pyx_t_2); + } + #else + __pyx_t_3 = __Pyx_PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 141, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_2 = __Pyx_PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 141, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + #endif + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + } else { + Py_ssize_t index = -1; + __pyx_t_5 = PyObject_GetIter(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 141, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_5); + index = 0; __pyx_t_3 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_3)) goto __pyx_L9_unpacking_failed; + __Pyx_GOTREF(__pyx_t_3); + index = 1; __pyx_t_2 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_2)) goto __pyx_L9_unpacking_failed; + __Pyx_GOTREF(__pyx_t_2); + if (__Pyx_IternextUnpackEndCheck(__pyx_t_6(__pyx_t_5), 2) < (0)) __PYX_ERR(0, 141, __pyx_L1_error) + __pyx_t_6 = NULL; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + goto __pyx_L10_unpacking_done; + __pyx_L9_unpacking_failed:; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_t_6 = NULL; + if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); + __PYX_ERR(0, 141, __pyx_L1_error) + __pyx_L10_unpacking_done:; + } + __pyx_v_a = __pyx_t_3; + __pyx_t_3 = 0; + __pyx_v_b = __pyx_t_2; + __pyx_t_2 = 0; + + /* "fontTools/cu2qu/cu2qu.py":142 + * if n == 6: + * a, b = split_cubic_into_two(p0, p1, p2, p3) + * return iter( # <<<<<<<<<<<<<< + * split_cubic_into_three(a[0], a[1], a[2], a[3]) + * + split_cubic_into_three(b[0], b[1], b[2], b[3]) +*/ + __Pyx_XDECREF(__pyx_r); + + /* "fontTools/cu2qu/cu2qu.py":143 + * a, b = split_cubic_into_two(p0, p1, p2, p3) + * return iter( + * split_cubic_into_three(a[0], a[1], a[2], a[3]) # <<<<<<<<<<<<<< + * + split_cubic_into_three(b[0], b[1], b[2], b[3]) + * ) +*/ + __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_a, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 143, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_7 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_4); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 143, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_a, 1, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 143, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_8 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_4); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 143, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_a, 2, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 143, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_9 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_4); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 143, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_a, 3, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 143, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_10 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_4); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 143, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = __pyx_f_9fontTools_5cu2qu_5cu2qu_split_cubic_into_three(__pyx_t_7, __pyx_t_8, __pyx_t_9, __pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 143, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + + /* "fontTools/cu2qu/cu2qu.py":144 + * return iter( + * split_cubic_into_three(a[0], a[1], a[2], a[3]) + * + split_cubic_into_three(b[0], b[1], b[2], b[3]) # <<<<<<<<<<<<<< + * ) + * +*/ + __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_b, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 144, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_10 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_2); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 144, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_b, 1, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 144, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_9 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_2); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 144, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_b, 2, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 144, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_8 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_2); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 144, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_b, 3, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 144, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_7 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_2); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 144, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __pyx_f_9fontTools_5cu2qu_5cu2qu_split_cubic_into_three(__pyx_t_10, __pyx_t_9, __pyx_t_8, __pyx_t_7); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 144, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyNumber_Add(__pyx_t_4, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 144, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "fontTools/cu2qu/cu2qu.py":142 + * if n == 6: + * a, b = split_cubic_into_two(p0, p1, p2, p3) + * return iter( # <<<<<<<<<<<<<< + * split_cubic_into_three(a[0], a[1], a[2], a[3]) + * + split_cubic_into_three(b[0], b[1], b[2], b[3]) +*/ + __pyx_t_2 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 142, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "fontTools/cu2qu/cu2qu.py":140 + * + split_cubic_into_two(b[0], b[1], b[2], b[3]) + * ) + * if n == 6: # <<<<<<<<<<<<<< + * a, b = split_cubic_into_two(p0, p1, p2, p3) + * return iter( +*/ + } + + /* "fontTools/cu2qu/cu2qu.py":147 + * ) + * + * return _split_cubic_into_n_gen(p0, p1, p2, p3, n) # <<<<<<<<<<<<<< + * + * +*/ + __Pyx_XDECREF(__pyx_r); + __pyx_t_3 = NULL; + __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_split_cubic_into_n_gen); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 147, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = __pyx_PyComplex_FromComplex(__pyx_v_p0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 147, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_11 = __pyx_PyComplex_FromComplex(__pyx_v_p1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 147, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_11); + __pyx_t_12 = __pyx_PyComplex_FromComplex(__pyx_v_p2); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 147, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_12); + __pyx_t_13 = __pyx_PyComplex_FromComplex(__pyx_v_p3); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 147, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_13); + __pyx_t_14 = 1; + #if CYTHON_UNPACK_METHODS + if (unlikely(PyMethod_Check(__pyx_t_4))) { + __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4); + assert(__pyx_t_3); + PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(__pyx__function); + __Pyx_DECREF_SET(__pyx_t_4, __pyx__function); + __pyx_t_14 = 0; + } + #endif + { + PyObject *__pyx_callargs[6] = {__pyx_t_3, __pyx_t_5, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_v_n}; + __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_4, __pyx_callargs+__pyx_t_14, (6-__pyx_t_14) | (__pyx_t_14*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; + __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; + __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 147, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + } + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "fontTools/cu2qu/cu2qu.py":108 + * + * + * @cython.cfunc # <<<<<<<<<<<<<< + * @cython.inline + * @cython.locals( +*/ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_11); + __Pyx_XDECREF(__pyx_t_12); + __Pyx_XDECREF(__pyx_t_13); + __Pyx_AddTraceback("fontTools.cu2qu.cu2qu.split_cubic_into_n_iter", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_a); + __Pyx_XDECREF(__pyx_v_b); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} +static PyObject *__pyx_gb_9fontTools_5cu2qu_5cu2qu_2generator(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ + +/* "fontTools/cu2qu/cu2qu.py":150 + * + * + * @cython.locals( # <<<<<<<<<<<<<< + * p0=cython.complex, + * p1=cython.complex, +*/ + +/* Python wrapper */ +static PyObject *__pyx_pw_9fontTools_5cu2qu_5cu2qu_1_split_cubic_into_n_gen(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +PyDoc_STRVAR(__pyx_doc_9fontTools_5cu2qu_5cu2qu__split_cubic_into_n_gen, "_split_cubic_into_n_gen(double complex p0, double complex p1, double complex p2, double complex p3, int n)"); +static PyMethodDef __pyx_mdef_9fontTools_5cu2qu_5cu2qu_1_split_cubic_into_n_gen = {"_split_cubic_into_n_gen", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_5cu2qu_5cu2qu_1_split_cubic_into_n_gen, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_5cu2qu_5cu2qu__split_cubic_into_n_gen}; +static PyObject *__pyx_pw_9fontTools_5cu2qu_5cu2qu_1_split_cubic_into_n_gen(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + __pyx_t_double_complex __pyx_v_p0; + __pyx_t_double_complex __pyx_v_p1; + __pyx_t_double_complex __pyx_v_p2; + __pyx_t_double_complex __pyx_v_p3; + int __pyx_v_n; + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[5] = {0,0,0,0,0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("_split_cubic_into_n_gen (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_SIZE + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + { + PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_p0,&__pyx_mstate_global->__pyx_n_u_p1,&__pyx_mstate_global->__pyx_n_u_p2,&__pyx_mstate_global->__pyx_n_u_p3,&__pyx_mstate_global->__pyx_n_u_n,0}; + const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; + if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 150, __pyx_L3_error) + if (__pyx_kwds_len > 0) { + switch (__pyx_nargs) { + case 5: + values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); + if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 150, __pyx_L3_error) + CYTHON_FALLTHROUGH; + case 4: + values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); + if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 150, __pyx_L3_error) + CYTHON_FALLTHROUGH; + case 3: + values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); + if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 150, __pyx_L3_error) + CYTHON_FALLTHROUGH; + case 2: + values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); + if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 150, __pyx_L3_error) + CYTHON_FALLTHROUGH; + case 1: + values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); + if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 150, __pyx_L3_error) + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "_split_cubic_into_n_gen", 0) < (0)) __PYX_ERR(0, 150, __pyx_L3_error) + for (Py_ssize_t i = __pyx_nargs; i < 5; i++) { + if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("_split_cubic_into_n_gen", 1, 5, 5, i); __PYX_ERR(0, 150, __pyx_L3_error) } + } + } else if (unlikely(__pyx_nargs != 5)) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); + if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 150, __pyx_L3_error) + values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); + if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 150, __pyx_L3_error) + values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); + if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 150, __pyx_L3_error) + values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); + if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 150, __pyx_L3_error) + values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); + if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 150, __pyx_L3_error) + } + __pyx_v_p0 = __Pyx_PyComplex_As___pyx_t_double_complex(values[0]); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 164, __pyx_L3_error) + __pyx_v_p1 = __Pyx_PyComplex_As___pyx_t_double_complex(values[1]); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 164, __pyx_L3_error) + __pyx_v_p2 = __Pyx_PyComplex_As___pyx_t_double_complex(values[2]); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 164, __pyx_L3_error) + __pyx_v_p3 = __Pyx_PyComplex_As___pyx_t_double_complex(values[3]); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 164, __pyx_L3_error) + __pyx_v_n = __Pyx_PyLong_As_int(values[4]); if (unlikely((__pyx_v_n == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 164, __pyx_L3_error) + } + goto __pyx_L6_skip; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("_split_cubic_into_n_gen", 1, 5, 5, __pyx_nargs); __PYX_ERR(0, 150, __pyx_L3_error) + __pyx_L6_skip:; + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + Py_XDECREF(values[__pyx_temp]); + } + __Pyx_AddTraceback("fontTools.cu2qu.cu2qu._split_cubic_into_n_gen", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_9fontTools_5cu2qu_5cu2qu__split_cubic_into_n_gen(__pyx_self, __pyx_v_p0, __pyx_v_p1, __pyx_v_p2, __pyx_v_p3, __pyx_v_n); + + /* function exit code */ + for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + Py_XDECREF(values[__pyx_temp]); + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9fontTools_5cu2qu_5cu2qu__split_cubic_into_n_gen(CYTHON_UNUSED PyObject *__pyx_self, __pyx_t_double_complex __pyx_v_p0, __pyx_t_double_complex __pyx_v_p1, __pyx_t_double_complex __pyx_v_p2, __pyx_t_double_complex __pyx_v_p3, int __pyx_v_n) { + struct __pyx_obj_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen *__pyx_cur_scope; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("_split_cubic_into_n_gen", 0); + __pyx_cur_scope = (struct __pyx_obj_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen *)__pyx_tp_new_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen(__pyx_mstate_global->__pyx_ptype_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen, __pyx_mstate_global->__pyx_empty_tuple, NULL); + if (unlikely(!__pyx_cur_scope)) { + __pyx_cur_scope = ((struct __pyx_obj_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen *)Py_None); + __Pyx_INCREF(Py_None); + __PYX_ERR(0, 150, __pyx_L1_error) + } else { + __Pyx_GOTREF((PyObject *)__pyx_cur_scope); + } + __pyx_cur_scope->__pyx_v_p0 = __pyx_v_p0; + __pyx_cur_scope->__pyx_v_p1 = __pyx_v_p1; + __pyx_cur_scope->__pyx_v_p2 = __pyx_v_p2; + __pyx_cur_scope->__pyx_v_p3 = __pyx_v_p3; + __pyx_cur_scope->__pyx_v_n = __pyx_v_n; + { + __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_9fontTools_5cu2qu_5cu2qu_2generator, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[0]), (PyObject *) __pyx_cur_scope, __pyx_mstate_global->__pyx_n_u_split_cubic_into_n_gen, __pyx_mstate_global->__pyx_n_u_split_cubic_into_n_gen, __pyx_mstate_global->__pyx_n_u_fontTools_cu2qu_cu2qu); if (unlikely(!gen)) __PYX_ERR(0, 150, __pyx_L1_error) + __Pyx_DECREF(__pyx_cur_scope); + __Pyx_RefNannyFinishContext(); + return (PyObject *) gen; + } + + /* function exit code */ + __pyx_L1_error:; + __Pyx_AddTraceback("fontTools.cu2qu.cu2qu._split_cubic_into_n_gen", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_DECREF((PyObject *)__pyx_cur_scope); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_gb_9fontTools_5cu2qu_5cu2qu_2generator(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ +{ + struct __pyx_obj_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen *__pyx_cur_scope = ((struct __pyx_obj_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen *)__pyx_generator->closure); + PyObject *__pyx_r = NULL; + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *(*__pyx_t_7)(PyObject *); + __pyx_t_double_complex __pyx_t_8; + __pyx_t_double_complex __pyx_t_9; + __pyx_t_double_complex __pyx_t_10; + __pyx_t_double_complex __pyx_t_11; + int __pyx_t_12; + int __pyx_t_13; + int __pyx_t_14; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("_split_cubic_into_n_gen", 0); + switch (__pyx_generator->resume_label) { + case 0: goto __pyx_L3_first_run; + case 1: goto __pyx_L8_resume_from_yield; + default: /* CPython raises the right error here */ + __Pyx_RefNannyFinishContext(); + return NULL; + } + __pyx_L3_first_run:; + if (unlikely(__pyx_sent_value != Py_None)) { + if (unlikely(__pyx_sent_value)) PyErr_SetString(PyExc_TypeError, "can't send non-None value to a just-started generator"); + __PYX_ERR(0, 150, __pyx_L1_error) + } + + /* "fontTools/cu2qu/cu2qu.py":165 + * ) + * def _split_cubic_into_n_gen(p0, p1, p2, p3, n): + * a, b, c, d = calc_cubic_parameters(p0, p1, p2, p3) # <<<<<<<<<<<<<< + * dt = 1 / n + * delta_2 = dt * dt +*/ + __pyx_t_1 = __pyx_f_9fontTools_5cu2qu_5cu2qu_calc_cubic_parameters(__pyx_cur_scope->__pyx_v_p0, __pyx_cur_scope->__pyx_v_p1, __pyx_cur_scope->__pyx_v_p2, __pyx_cur_scope->__pyx_v_p3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 165, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if ((likely(PyTuple_CheckExact(__pyx_t_1))) || (PyList_CheckExact(__pyx_t_1))) { + PyObject* sequence = __pyx_t_1; + Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); + if (unlikely(size != 4)) { + if (size > 4) __Pyx_RaiseTooManyValuesError(4); + else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); + __PYX_ERR(0, 165, __pyx_L1_error) + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + if (likely(PyTuple_CheckExact(sequence))) { + __pyx_t_2 = PyTuple_GET_ITEM(sequence, 0); + __Pyx_INCREF(__pyx_t_2); + __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); + __Pyx_INCREF(__pyx_t_3); + __pyx_t_4 = PyTuple_GET_ITEM(sequence, 2); + __Pyx_INCREF(__pyx_t_4); + __pyx_t_5 = PyTuple_GET_ITEM(sequence, 3); + __Pyx_INCREF(__pyx_t_5); + } else { + __pyx_t_2 = __Pyx_PyList_GetItemRefFast(sequence, 0, __Pyx_ReferenceSharing_SharedReference); + if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 165, __pyx_L1_error) + __Pyx_XGOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_PyList_GetItemRefFast(sequence, 1, __Pyx_ReferenceSharing_SharedReference); + if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 165, __pyx_L1_error) + __Pyx_XGOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyList_GetItemRefFast(sequence, 2, __Pyx_ReferenceSharing_SharedReference); + if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 165, __pyx_L1_error) + __Pyx_XGOTREF(__pyx_t_4); + __pyx_t_5 = __Pyx_PyList_GetItemRefFast(sequence, 3, __Pyx_ReferenceSharing_SharedReference); + if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 165, __pyx_L1_error) + __Pyx_XGOTREF(__pyx_t_5); + } + #else + { + Py_ssize_t i; + PyObject** temps[4] = {&__pyx_t_2,&__pyx_t_3,&__pyx_t_4,&__pyx_t_5}; + for (i=0; i < 4; i++) { + PyObject* item = __Pyx_PySequence_ITEM(sequence, i); if (unlikely(!item)) __PYX_ERR(0, 165, __pyx_L1_error) + __Pyx_GOTREF(item); + *(temps[i]) = item; + } + } + #endif + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + } else { + Py_ssize_t index = -1; + PyObject** temps[4] = {&__pyx_t_2,&__pyx_t_3,&__pyx_t_4,&__pyx_t_5}; + __pyx_t_6 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 165, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_7 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_6); + for (index=0; index < 4; index++) { + PyObject* item = __pyx_t_7(__pyx_t_6); if (unlikely(!item)) goto __pyx_L4_unpacking_failed; + __Pyx_GOTREF(item); + *(temps[index]) = item; + } + if (__Pyx_IternextUnpackEndCheck(__pyx_t_7(__pyx_t_6), 4) < (0)) __PYX_ERR(0, 165, __pyx_L1_error) + __pyx_t_7 = NULL; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + goto __pyx_L5_unpacking_done; + __pyx_L4_unpacking_failed:; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __pyx_t_7 = NULL; + if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); + __PYX_ERR(0, 165, __pyx_L1_error) + __pyx_L5_unpacking_done:; + } + __pyx_t_8 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_2); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 165, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_9 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_3); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 165, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_10 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_4); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 165, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_11 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_5); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 165, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_cur_scope->__pyx_v_a = __pyx_t_8; + __pyx_cur_scope->__pyx_v_b = __pyx_t_9; + __pyx_cur_scope->__pyx_v_c = __pyx_t_10; + __pyx_cur_scope->__pyx_v_d = __pyx_t_11; + + /* "fontTools/cu2qu/cu2qu.py":166 + * def _split_cubic_into_n_gen(p0, p1, p2, p3, n): + * a, b, c, d = calc_cubic_parameters(p0, p1, p2, p3) + * dt = 1 / n # <<<<<<<<<<<<<< + * delta_2 = dt * dt + * delta_3 = dt * delta_2 +*/ + if (unlikely(__pyx_cur_scope->__pyx_v_n == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + __PYX_ERR(0, 166, __pyx_L1_error) + } + __pyx_cur_scope->__pyx_v_dt = (1.0 / ((double)__pyx_cur_scope->__pyx_v_n)); + + /* "fontTools/cu2qu/cu2qu.py":167 + * a, b, c, d = calc_cubic_parameters(p0, p1, p2, p3) + * dt = 1 / n + * delta_2 = dt * dt # <<<<<<<<<<<<<< + * delta_3 = dt * delta_2 + * for i in range(n): +*/ + __pyx_cur_scope->__pyx_v_delta_2 = (__pyx_cur_scope->__pyx_v_dt * __pyx_cur_scope->__pyx_v_dt); + + /* "fontTools/cu2qu/cu2qu.py":168 + * dt = 1 / n + * delta_2 = dt * dt + * delta_3 = dt * delta_2 # <<<<<<<<<<<<<< + * for i in range(n): + * t1 = i * dt +*/ + __pyx_cur_scope->__pyx_v_delta_3 = (__pyx_cur_scope->__pyx_v_dt * __pyx_cur_scope->__pyx_v_delta_2); + + /* "fontTools/cu2qu/cu2qu.py":169 + * delta_2 = dt * dt + * delta_3 = dt * delta_2 + * for i in range(n): # <<<<<<<<<<<<<< + * t1 = i * dt + * t1_2 = t1 * t1 +*/ + __pyx_t_12 = __pyx_cur_scope->__pyx_v_n; + __pyx_t_13 = __pyx_t_12; + for (__pyx_t_14 = 0; __pyx_t_14 < __pyx_t_13; __pyx_t_14+=1) { + __pyx_cur_scope->__pyx_v_i = __pyx_t_14; + + /* "fontTools/cu2qu/cu2qu.py":170 + * delta_3 = dt * delta_2 + * for i in range(n): + * t1 = i * dt # <<<<<<<<<<<<<< + * t1_2 = t1 * t1 + * # calc new a, b, c and d +*/ + __pyx_cur_scope->__pyx_v_t1 = (__pyx_cur_scope->__pyx_v_i * __pyx_cur_scope->__pyx_v_dt); + + /* "fontTools/cu2qu/cu2qu.py":171 + * for i in range(n): + * t1 = i * dt + * t1_2 = t1 * t1 # <<<<<<<<<<<<<< + * # calc new a, b, c and d + * a1 = a * delta_3 +*/ + __pyx_cur_scope->__pyx_v_t1_2 = (__pyx_cur_scope->__pyx_v_t1 * __pyx_cur_scope->__pyx_v_t1); + + /* "fontTools/cu2qu/cu2qu.py":173 + * t1_2 = t1 * t1 + * # calc new a, b, c and d + * a1 = a * delta_3 # <<<<<<<<<<<<<< + * b1 = (3 * a * t1 + b) * delta_2 + * c1 = (2 * b * t1 + c + 3 * a * t1_2) * dt +*/ + __pyx_cur_scope->__pyx_v_a1 = __Pyx_c_prod_double(__pyx_cur_scope->__pyx_v_a, __pyx_t_double_complex_from_parts(__pyx_cur_scope->__pyx_v_delta_3, 0)); + + /* "fontTools/cu2qu/cu2qu.py":174 + * # calc new a, b, c and d + * a1 = a * delta_3 + * b1 = (3 * a * t1 + b) * delta_2 # <<<<<<<<<<<<<< + * c1 = (2 * b * t1 + c + 3 * a * t1_2) * dt + * d1 = a * t1 * t1_2 + b * t1_2 + c * t1 + d +*/ + __pyx_cur_scope->__pyx_v_b1 = __Pyx_c_prod_double(__Pyx_c_sum_double(__Pyx_c_prod_double(__Pyx_c_prod_double(__pyx_t_double_complex_from_parts(3, 0), __pyx_cur_scope->__pyx_v_a), __pyx_t_double_complex_from_parts(__pyx_cur_scope->__pyx_v_t1, 0)), __pyx_cur_scope->__pyx_v_b), __pyx_t_double_complex_from_parts(__pyx_cur_scope->__pyx_v_delta_2, 0)); + + /* "fontTools/cu2qu/cu2qu.py":175 + * a1 = a * delta_3 + * b1 = (3 * a * t1 + b) * delta_2 + * c1 = (2 * b * t1 + c + 3 * a * t1_2) * dt # <<<<<<<<<<<<<< + * d1 = a * t1 * t1_2 + b * t1_2 + c * t1 + d + * yield calc_cubic_points(a1, b1, c1, d1) +*/ + __pyx_cur_scope->__pyx_v_c1 = __Pyx_c_prod_double(__Pyx_c_sum_double(__Pyx_c_sum_double(__Pyx_c_prod_double(__Pyx_c_prod_double(__pyx_t_double_complex_from_parts(2, 0), __pyx_cur_scope->__pyx_v_b), __pyx_t_double_complex_from_parts(__pyx_cur_scope->__pyx_v_t1, 0)), __pyx_cur_scope->__pyx_v_c), __Pyx_c_prod_double(__Pyx_c_prod_double(__pyx_t_double_complex_from_parts(3, 0), __pyx_cur_scope->__pyx_v_a), __pyx_t_double_complex_from_parts(__pyx_cur_scope->__pyx_v_t1_2, 0))), __pyx_t_double_complex_from_parts(__pyx_cur_scope->__pyx_v_dt, 0)); + + /* "fontTools/cu2qu/cu2qu.py":176 + * b1 = (3 * a * t1 + b) * delta_2 + * c1 = (2 * b * t1 + c + 3 * a * t1_2) * dt + * d1 = a * t1 * t1_2 + b * t1_2 + c * t1 + d # <<<<<<<<<<<<<< + * yield calc_cubic_points(a1, b1, c1, d1) + * +*/ + __pyx_cur_scope->__pyx_v_d1 = __Pyx_c_sum_double(__Pyx_c_sum_double(__Pyx_c_sum_double(__Pyx_c_prod_double(__Pyx_c_prod_double(__pyx_cur_scope->__pyx_v_a, __pyx_t_double_complex_from_parts(__pyx_cur_scope->__pyx_v_t1, 0)), __pyx_t_double_complex_from_parts(__pyx_cur_scope->__pyx_v_t1_2, 0)), __Pyx_c_prod_double(__pyx_cur_scope->__pyx_v_b, __pyx_t_double_complex_from_parts(__pyx_cur_scope->__pyx_v_t1_2, 0))), __Pyx_c_prod_double(__pyx_cur_scope->__pyx_v_c, __pyx_t_double_complex_from_parts(__pyx_cur_scope->__pyx_v_t1, 0))), __pyx_cur_scope->__pyx_v_d); + + /* "fontTools/cu2qu/cu2qu.py":177 + * c1 = (2 * b * t1 + c + 3 * a * t1_2) * dt + * d1 = a * t1 * t1_2 + b * t1_2 + c * t1 + d + * yield calc_cubic_points(a1, b1, c1, d1) # <<<<<<<<<<<<<< + * + * +*/ + __pyx_t_1 = __pyx_f_9fontTools_5cu2qu_5cu2qu_calc_cubic_points(__pyx_cur_scope->__pyx_v_a1, __pyx_cur_scope->__pyx_v_b1, __pyx_cur_scope->__pyx_v_c1, __pyx_cur_scope->__pyx_v_d1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 177, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + __pyx_cur_scope->__pyx_t_0 = __pyx_t_12; + __pyx_cur_scope->__pyx_t_1 = __pyx_t_13; + __pyx_cur_scope->__pyx_t_2 = __pyx_t_14; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + __Pyx_Coroutine_ResetAndClearException(__pyx_generator); + /* return from generator, yielding value */ + __pyx_generator->resume_label = 1; + return __pyx_r; + __pyx_L8_resume_from_yield:; + __pyx_t_12 = __pyx_cur_scope->__pyx_t_0; + __pyx_t_13 = __pyx_cur_scope->__pyx_t_1; + __pyx_t_14 = __pyx_cur_scope->__pyx_t_2; + if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 177, __pyx_L1_error) + } + CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); + + /* "fontTools/cu2qu/cu2qu.py":150 + * + * + * @cython.locals( # <<<<<<<<<<<<<< + * p0=cython.complex, + * p1=cython.complex, +*/ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + if (__Pyx_PyErr_Occurred()) { + __Pyx_Generator_Replace_StopIteration(0); + __Pyx_AddTraceback("_split_cubic_into_n_gen", __pyx_clineno, __pyx_lineno, __pyx_filename); + } + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + #if !CYTHON_USE_EXC_INFO_STACK + __Pyx_Coroutine_ResetAndClearException(__pyx_generator); + #endif + __pyx_generator->resume_label = -1; + __Pyx_Coroutine_clear((PyObject*)__pyx_generator); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "fontTools/cu2qu/cu2qu.py":180 + * + * + * @cython.cfunc # <<<<<<<<<<<<<< + * @cython.inline + * @cython.locals( +*/ + +static CYTHON_INLINE PyObject *__pyx_f_9fontTools_5cu2qu_5cu2qu_split_cubic_into_two(__pyx_t_double_complex __pyx_v_p0, __pyx_t_double_complex __pyx_v_p1, __pyx_t_double_complex __pyx_v_p2, __pyx_t_double_complex __pyx_v_p3) { + __pyx_t_double_complex __pyx_v_mid; + __pyx_t_double_complex __pyx_v_deriv3; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __pyx_t_double_complex __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("split_cubic_into_two", 0); + + /* "fontTools/cu2qu/cu2qu.py":201 + * values). + * """ + * mid = (p0 + 3 * (p1 + p2) + p3) * 0.125 # <<<<<<<<<<<<<< + * deriv3 = (p3 + p2 - p1 - p0) * 0.125 + * return ( +*/ + __pyx_v_mid = __Pyx_c_prod_double(__Pyx_c_sum_double(__Pyx_c_sum_double(__pyx_v_p0, __Pyx_c_prod_double(__pyx_t_double_complex_from_parts(3, 0), __Pyx_c_sum_double(__pyx_v_p1, __pyx_v_p2))), __pyx_v_p3), __pyx_t_double_complex_from_parts(0.125, 0)); + + /* "fontTools/cu2qu/cu2qu.py":202 + * """ + * mid = (p0 + 3 * (p1 + p2) + p3) * 0.125 + * deriv3 = (p3 + p2 - p1 - p0) * 0.125 # <<<<<<<<<<<<<< + * return ( + * (p0, (p0 + p1) * 0.5, mid - deriv3, mid), +*/ + __pyx_v_deriv3 = __Pyx_c_prod_double(__Pyx_c_diff_double(__Pyx_c_diff_double(__Pyx_c_sum_double(__pyx_v_p3, __pyx_v_p2), __pyx_v_p1), __pyx_v_p0), __pyx_t_double_complex_from_parts(0.125, 0)); + + /* "fontTools/cu2qu/cu2qu.py":203 + * mid = (p0 + 3 * (p1 + p2) + p3) * 0.125 + * deriv3 = (p3 + p2 - p1 - p0) * 0.125 + * return ( # <<<<<<<<<<<<<< + * (p0, (p0 + p1) * 0.5, mid - deriv3, mid), + * (mid, mid + deriv3, (p2 + p3) * 0.5, p3), +*/ + __Pyx_XDECREF(__pyx_r); + + /* "fontTools/cu2qu/cu2qu.py":204 + * deriv3 = (p3 + p2 - p1 - p0) * 0.125 + * return ( + * (p0, (p0 + p1) * 0.5, mid - deriv3, mid), # <<<<<<<<<<<<<< + * (mid, mid + deriv3, (p2 + p3) * 0.5, p3), + * ) +*/ + __pyx_t_1 = __pyx_PyComplex_FromComplex(__pyx_v_p0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 204, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_c_prod_double(__Pyx_c_sum_double(__pyx_v_p0, __pyx_v_p1), __pyx_t_double_complex_from_parts(0.5, 0)); + __pyx_t_3 = __pyx_PyComplex_FromComplex(__pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 204, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_2 = __Pyx_c_diff_double(__pyx_v_mid, __pyx_v_deriv3); + __pyx_t_4 = __pyx_PyComplex_FromComplex(__pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 204, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = __pyx_PyComplex_FromComplex(__pyx_v_mid); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 204, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = PyTuple_New(4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 204, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GIVEREF(__pyx_t_1); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 204, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_3); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_3) != (0)) __PYX_ERR(0, 204, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_4); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_t_4) != (0)) __PYX_ERR(0, 204, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_5); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 3, __pyx_t_5) != (0)) __PYX_ERR(0, 204, __pyx_L1_error); + __pyx_t_1 = 0; + __pyx_t_3 = 0; + __pyx_t_4 = 0; + __pyx_t_5 = 0; + + /* "fontTools/cu2qu/cu2qu.py":205 + * return ( + * (p0, (p0 + p1) * 0.5, mid - deriv3, mid), + * (mid, mid + deriv3, (p2 + p3) * 0.5, p3), # <<<<<<<<<<<<<< + * ) + * +*/ + __pyx_t_5 = __pyx_PyComplex_FromComplex(__pyx_v_mid); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 205, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_2 = __Pyx_c_sum_double(__pyx_v_mid, __pyx_v_deriv3); + __pyx_t_4 = __pyx_PyComplex_FromComplex(__pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 205, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_2 = __Pyx_c_prod_double(__Pyx_c_sum_double(__pyx_v_p2, __pyx_v_p3), __pyx_t_double_complex_from_parts(0.5, 0)); + __pyx_t_3 = __pyx_PyComplex_FromComplex(__pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 205, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_1 = __pyx_PyComplex_FromComplex(__pyx_v_p3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 205, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_7 = PyTuple_New(4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 205, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_GIVEREF(__pyx_t_5); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 205, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_4); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 205, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_3); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_7, 2, __pyx_t_3) != (0)) __PYX_ERR(0, 205, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_1); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_7, 3, __pyx_t_1) != (0)) __PYX_ERR(0, 205, __pyx_L1_error); + __pyx_t_5 = 0; + __pyx_t_4 = 0; + __pyx_t_3 = 0; + __pyx_t_1 = 0; + + /* "fontTools/cu2qu/cu2qu.py":204 + * deriv3 = (p3 + p2 - p1 - p0) * 0.125 + * return ( + * (p0, (p0 + p1) * 0.5, mid - deriv3, mid), # <<<<<<<<<<<<<< + * (mid, mid + deriv3, (p2 + p3) * 0.5, p3), + * ) +*/ + __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 204, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_6); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 204, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_7); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_7) != (0)) __PYX_ERR(0, 204, __pyx_L1_error); + __pyx_t_6 = 0; + __pyx_t_7 = 0; + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "fontTools/cu2qu/cu2qu.py":180 + * + * + * @cython.cfunc # <<<<<<<<<<<<<< + * @cython.inline + * @cython.locals( +*/ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_AddTraceback("fontTools.cu2qu.cu2qu.split_cubic_into_two", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "fontTools/cu2qu/cu2qu.py":209 + * + * + * @cython.cfunc # <<<<<<<<<<<<<< + * @cython.inline + * @cython.locals( +*/ + +static CYTHON_INLINE PyObject *__pyx_f_9fontTools_5cu2qu_5cu2qu_split_cubic_into_three(__pyx_t_double_complex __pyx_v_p0, __pyx_t_double_complex __pyx_v_p1, __pyx_t_double_complex __pyx_v_p2, __pyx_t_double_complex __pyx_v_p3) { + __pyx_t_double_complex __pyx_v_mid1; + __pyx_t_double_complex __pyx_v_deriv1; + __pyx_t_double_complex __pyx_v_mid2; + __pyx_t_double_complex __pyx_v_deriv2; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __pyx_t_double_complex __pyx_t_2; + __pyx_t_double_complex __pyx_t_3; + __pyx_t_double_complex __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + PyObject *__pyx_t_9 = NULL; + PyObject *__pyx_t_10 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("split_cubic_into_three", 0); + + /* "fontTools/cu2qu/cu2qu.py":238 + * values). + * """ + * mid1 = (8 * p0 + 12 * p1 + 6 * p2 + p3) * (1 / 27) # <<<<<<<<<<<<<< + * deriv1 = (p3 + 3 * p2 - 4 * p0) * (1 / 27) + * mid2 = (p0 + 6 * p1 + 12 * p2 + 8 * p3) * (1 / 27) +*/ + __pyx_v_mid1 = __Pyx_c_prod_double(__Pyx_c_sum_double(__Pyx_c_sum_double(__Pyx_c_sum_double(__Pyx_c_prod_double(__pyx_t_double_complex_from_parts(8, 0), __pyx_v_p0), __Pyx_c_prod_double(__pyx_t_double_complex_from_parts(12, 0), __pyx_v_p1)), __Pyx_c_prod_double(__pyx_t_double_complex_from_parts(6, 0), __pyx_v_p2)), __pyx_v_p3), __pyx_t_double_complex_from_parts((1.0 / 27.0), 0)); + + /* "fontTools/cu2qu/cu2qu.py":239 + * """ + * mid1 = (8 * p0 + 12 * p1 + 6 * p2 + p3) * (1 / 27) + * deriv1 = (p3 + 3 * p2 - 4 * p0) * (1 / 27) # <<<<<<<<<<<<<< + * mid2 = (p0 + 6 * p1 + 12 * p2 + 8 * p3) * (1 / 27) + * deriv2 = (4 * p3 - 3 * p1 - p0) * (1 / 27) +*/ + __pyx_v_deriv1 = __Pyx_c_prod_double(__Pyx_c_diff_double(__Pyx_c_sum_double(__pyx_v_p3, __Pyx_c_prod_double(__pyx_t_double_complex_from_parts(3, 0), __pyx_v_p2)), __Pyx_c_prod_double(__pyx_t_double_complex_from_parts(4, 0), __pyx_v_p0)), __pyx_t_double_complex_from_parts((1.0 / 27.0), 0)); + + /* "fontTools/cu2qu/cu2qu.py":240 + * mid1 = (8 * p0 + 12 * p1 + 6 * p2 + p3) * (1 / 27) + * deriv1 = (p3 + 3 * p2 - 4 * p0) * (1 / 27) + * mid2 = (p0 + 6 * p1 + 12 * p2 + 8 * p3) * (1 / 27) # <<<<<<<<<<<<<< + * deriv2 = (4 * p3 - 3 * p1 - p0) * (1 / 27) + * return ( +*/ + __pyx_v_mid2 = __Pyx_c_prod_double(__Pyx_c_sum_double(__Pyx_c_sum_double(__Pyx_c_sum_double(__pyx_v_p0, __Pyx_c_prod_double(__pyx_t_double_complex_from_parts(6, 0), __pyx_v_p1)), __Pyx_c_prod_double(__pyx_t_double_complex_from_parts(12, 0), __pyx_v_p2)), __Pyx_c_prod_double(__pyx_t_double_complex_from_parts(8, 0), __pyx_v_p3)), __pyx_t_double_complex_from_parts((1.0 / 27.0), 0)); + + /* "fontTools/cu2qu/cu2qu.py":241 + * deriv1 = (p3 + 3 * p2 - 4 * p0) * (1 / 27) + * mid2 = (p0 + 6 * p1 + 12 * p2 + 8 * p3) * (1 / 27) + * deriv2 = (4 * p3 - 3 * p1 - p0) * (1 / 27) # <<<<<<<<<<<<<< + * return ( + * (p0, (2 * p0 + p1) / 3.0, mid1 - deriv1, mid1), +*/ + __pyx_v_deriv2 = __Pyx_c_prod_double(__Pyx_c_diff_double(__Pyx_c_diff_double(__Pyx_c_prod_double(__pyx_t_double_complex_from_parts(4, 0), __pyx_v_p3), __Pyx_c_prod_double(__pyx_t_double_complex_from_parts(3, 0), __pyx_v_p1)), __pyx_v_p0), __pyx_t_double_complex_from_parts((1.0 / 27.0), 0)); + + /* "fontTools/cu2qu/cu2qu.py":242 + * mid2 = (p0 + 6 * p1 + 12 * p2 + 8 * p3) * (1 / 27) + * deriv2 = (4 * p3 - 3 * p1 - p0) * (1 / 27) + * return ( # <<<<<<<<<<<<<< + * (p0, (2 * p0 + p1) / 3.0, mid1 - deriv1, mid1), + * (mid1, mid1 + deriv1, mid2 - deriv2, mid2), +*/ + __Pyx_XDECREF(__pyx_r); + + /* "fontTools/cu2qu/cu2qu.py":243 + * deriv2 = (4 * p3 - 3 * p1 - p0) * (1 / 27) + * return ( + * (p0, (2 * p0 + p1) / 3.0, mid1 - deriv1, mid1), # <<<<<<<<<<<<<< + * (mid1, mid1 + deriv1, mid2 - deriv2, mid2), + * (mid2, mid2 + deriv2, (p2 + 2 * p3) / 3.0, p3), +*/ + __pyx_t_1 = __pyx_PyComplex_FromComplex(__pyx_v_p0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 243, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_c_sum_double(__Pyx_c_prod_double(__pyx_t_double_complex_from_parts(2, 0), __pyx_v_p0), __pyx_v_p1); + __pyx_t_3 = __pyx_t_double_complex_from_parts(3.0, 0); + if (unlikely(__Pyx_c_is_zero_double(__pyx_t_3))) { + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + __PYX_ERR(0, 243, __pyx_L1_error) + } + __pyx_t_4 = __Pyx_c_quot_double(__pyx_t_2, __pyx_t_3); + __pyx_t_5 = __pyx_PyComplex_FromComplex(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 243, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_4 = __Pyx_c_diff_double(__pyx_v_mid1, __pyx_v_deriv1); + __pyx_t_6 = __pyx_PyComplex_FromComplex(__pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 243, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_7 = __pyx_PyComplex_FromComplex(__pyx_v_mid1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 243, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_8 = PyTuple_New(4); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 243, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_GIVEREF(__pyx_t_1); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 243, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_5); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_5) != (0)) __PYX_ERR(0, 243, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_6); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_6) != (0)) __PYX_ERR(0, 243, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_7); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_8, 3, __pyx_t_7) != (0)) __PYX_ERR(0, 243, __pyx_L1_error); + __pyx_t_1 = 0; + __pyx_t_5 = 0; + __pyx_t_6 = 0; + __pyx_t_7 = 0; + + /* "fontTools/cu2qu/cu2qu.py":244 + * return ( + * (p0, (2 * p0 + p1) / 3.0, mid1 - deriv1, mid1), + * (mid1, mid1 + deriv1, mid2 - deriv2, mid2), # <<<<<<<<<<<<<< + * (mid2, mid2 + deriv2, (p2 + 2 * p3) / 3.0, p3), + * ) +*/ + __pyx_t_7 = __pyx_PyComplex_FromComplex(__pyx_v_mid1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 244, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_4 = __Pyx_c_sum_double(__pyx_v_mid1, __pyx_v_deriv1); + __pyx_t_6 = __pyx_PyComplex_FromComplex(__pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 244, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_4 = __Pyx_c_diff_double(__pyx_v_mid2, __pyx_v_deriv2); + __pyx_t_5 = __pyx_PyComplex_FromComplex(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 244, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_1 = __pyx_PyComplex_FromComplex(__pyx_v_mid2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 244, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_9 = PyTuple_New(4); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 244, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __Pyx_GIVEREF(__pyx_t_7); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 244, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_6); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_t_6) != (0)) __PYX_ERR(0, 244, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_5); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 2, __pyx_t_5) != (0)) __PYX_ERR(0, 244, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_1); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 3, __pyx_t_1) != (0)) __PYX_ERR(0, 244, __pyx_L1_error); + __pyx_t_7 = 0; + __pyx_t_6 = 0; + __pyx_t_5 = 0; + __pyx_t_1 = 0; + + /* "fontTools/cu2qu/cu2qu.py":245 + * (p0, (2 * p0 + p1) / 3.0, mid1 - deriv1, mid1), + * (mid1, mid1 + deriv1, mid2 - deriv2, mid2), + * (mid2, mid2 + deriv2, (p2 + 2 * p3) / 3.0, p3), # <<<<<<<<<<<<<< + * ) + * +*/ + __pyx_t_1 = __pyx_PyComplex_FromComplex(__pyx_v_mid2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 245, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_4 = __Pyx_c_sum_double(__pyx_v_mid2, __pyx_v_deriv2); + __pyx_t_5 = __pyx_PyComplex_FromComplex(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 245, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_4 = __Pyx_c_sum_double(__pyx_v_p2, __Pyx_c_prod_double(__pyx_t_double_complex_from_parts(2, 0), __pyx_v_p3)); + __pyx_t_3 = __pyx_t_double_complex_from_parts(3.0, 0); + if (unlikely(__Pyx_c_is_zero_double(__pyx_t_3))) { + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + __PYX_ERR(0, 245, __pyx_L1_error) + } + __pyx_t_2 = __Pyx_c_quot_double(__pyx_t_4, __pyx_t_3); + __pyx_t_6 = __pyx_PyComplex_FromComplex(__pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 245, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_7 = __pyx_PyComplex_FromComplex(__pyx_v_p3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 245, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_10 = PyTuple_New(4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 245, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_GIVEREF(__pyx_t_1); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 245, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_5); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_5) != (0)) __PYX_ERR(0, 245, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_6); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_10, 2, __pyx_t_6) != (0)) __PYX_ERR(0, 245, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_7); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_10, 3, __pyx_t_7) != (0)) __PYX_ERR(0, 245, __pyx_L1_error); + __pyx_t_1 = 0; + __pyx_t_5 = 0; + __pyx_t_6 = 0; + __pyx_t_7 = 0; + + /* "fontTools/cu2qu/cu2qu.py":243 + * deriv2 = (4 * p3 - 3 * p1 - p0) * (1 / 27) + * return ( + * (p0, (2 * p0 + p1) / 3.0, mid1 - deriv1, mid1), # <<<<<<<<<<<<<< + * (mid1, mid1 + deriv1, mid2 - deriv2, mid2), + * (mid2, mid2 + deriv2, (p2 + 2 * p3) / 3.0, p3), +*/ + __pyx_t_7 = PyTuple_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 243, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_GIVEREF(__pyx_t_8); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_8) != (0)) __PYX_ERR(0, 243, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_9); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_9) != (0)) __PYX_ERR(0, 243, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_10); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_7, 2, __pyx_t_10) != (0)) __PYX_ERR(0, 243, __pyx_L1_error); + __pyx_t_8 = 0; + __pyx_t_9 = 0; + __pyx_t_10 = 0; + __pyx_r = __pyx_t_7; + __pyx_t_7 = 0; + goto __pyx_L0; + + /* "fontTools/cu2qu/cu2qu.py":209 + * + * + * @cython.cfunc # <<<<<<<<<<<<<< + * @cython.inline + * @cython.locals( +*/ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_XDECREF(__pyx_t_9); + __Pyx_XDECREF(__pyx_t_10); + __Pyx_AddTraceback("fontTools.cu2qu.cu2qu.split_cubic_into_three", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "fontTools/cu2qu/cu2qu.py":249 + * + * + * @cython.cfunc # <<<<<<<<<<<<<< + * @cython.inline + * @cython.returns(cython.complex) +*/ + +static CYTHON_INLINE __pyx_t_double_complex __pyx_f_9fontTools_5cu2qu_5cu2qu_cubic_approx_control(double __pyx_v_t, __pyx_t_double_complex __pyx_v_p0, __pyx_t_double_complex __pyx_v_p1, __pyx_t_double_complex __pyx_v_p2, __pyx_t_double_complex __pyx_v_p3) { + __pyx_t_double_complex __pyx_v__p1; + __pyx_t_double_complex __pyx_v__p2; + __pyx_t_double_complex __pyx_r; + + /* "fontTools/cu2qu/cu2qu.py":273 + * complex: Location of candidate control point on quadratic curve. + * """ + * _p1 = p0 + (p1 - p0) * 1.5 # <<<<<<<<<<<<<< + * _p2 = p3 + (p2 - p3) * 1.5 + * return _p1 + (_p2 - _p1) * t +*/ + __pyx_v__p1 = __Pyx_c_sum_double(__pyx_v_p0, __Pyx_c_prod_double(__Pyx_c_diff_double(__pyx_v_p1, __pyx_v_p0), __pyx_t_double_complex_from_parts(1.5, 0))); + + /* "fontTools/cu2qu/cu2qu.py":274 + * """ + * _p1 = p0 + (p1 - p0) * 1.5 + * _p2 = p3 + (p2 - p3) * 1.5 # <<<<<<<<<<<<<< + * return _p1 + (_p2 - _p1) * t + * +*/ + __pyx_v__p2 = __Pyx_c_sum_double(__pyx_v_p3, __Pyx_c_prod_double(__Pyx_c_diff_double(__pyx_v_p2, __pyx_v_p3), __pyx_t_double_complex_from_parts(1.5, 0))); + + /* "fontTools/cu2qu/cu2qu.py":275 + * _p1 = p0 + (p1 - p0) * 1.5 + * _p2 = p3 + (p2 - p3) * 1.5 + * return _p1 + (_p2 - _p1) * t # <<<<<<<<<<<<<< + * + * +*/ + __pyx_r = __Pyx_c_sum_double(__pyx_v__p1, __Pyx_c_prod_double(__Pyx_c_diff_double(__pyx_v__p2, __pyx_v__p1), __pyx_t_double_complex_from_parts(__pyx_v_t, 0))); + goto __pyx_L0; + + /* "fontTools/cu2qu/cu2qu.py":249 + * + * + * @cython.cfunc # <<<<<<<<<<<<<< + * @cython.inline + * @cython.returns(cython.complex) +*/ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "fontTools/cu2qu/cu2qu.py":278 + * + * + * @cython.cfunc # <<<<<<<<<<<<<< + * @cython.inline + * @cython.returns(cython.complex) +*/ + +static CYTHON_INLINE __pyx_t_double_complex __pyx_f_9fontTools_5cu2qu_5cu2qu_calc_intersect(__pyx_t_double_complex __pyx_v_a, __pyx_t_double_complex __pyx_v_b, __pyx_t_double_complex __pyx_v_c, __pyx_t_double_complex __pyx_v_d) { + __pyx_t_double_complex __pyx_v_ab; + __pyx_t_double_complex __pyx_v_cd; + __pyx_t_double_complex __pyx_v_p; + double __pyx_v_h; + __pyx_t_double_complex __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + double __pyx_t_4; + double __pyx_t_5; + int __pyx_t_6; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + PyObject *__pyx_t_9 = NULL; + int __pyx_t_10; + int __pyx_t_11; + PyObject *__pyx_t_12 = NULL; + PyObject *__pyx_t_13 = NULL; + PyObject *__pyx_t_14 = NULL; + PyObject *__pyx_t_15 = NULL; + size_t __pyx_t_16; + __pyx_t_double_complex __pyx_t_17; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("calc_intersect", 0); + + /* "fontTools/cu2qu/cu2qu.py":296 + * if no intersection was found. + * """ + * ab = b - a # <<<<<<<<<<<<<< + * cd = d - c + * p = ab * 1j +*/ + __pyx_v_ab = __Pyx_c_diff_double(__pyx_v_b, __pyx_v_a); + + /* "fontTools/cu2qu/cu2qu.py":297 + * """ + * ab = b - a + * cd = d - c # <<<<<<<<<<<<<< + * p = ab * 1j + * try: +*/ + __pyx_v_cd = __Pyx_c_diff_double(__pyx_v_d, __pyx_v_c); + + /* "fontTools/cu2qu/cu2qu.py":298 + * ab = b - a + * cd = d - c + * p = ab * 1j # <<<<<<<<<<<<<< + * try: + * h = dot(p, a - c) / dot(p, cd) +*/ + __pyx_v_p = __Pyx_c_prod_double(__pyx_v_ab, __pyx_t_double_complex_from_parts(0, 1.0)); + + /* "fontTools/cu2qu/cu2qu.py":299 + * cd = d - c + * p = ab * 1j + * try: # <<<<<<<<<<<<<< + * h = dot(p, a - c) / dot(p, cd) + * except ZeroDivisionError: +*/ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "fontTools/cu2qu/cu2qu.py":300 + * p = ab * 1j + * try: + * h = dot(p, a - c) / dot(p, cd) # <<<<<<<<<<<<<< + * except ZeroDivisionError: + * # if 3 or 4 points are equal, we do have an intersection despite the zero-div: +*/ + __pyx_t_4 = __pyx_f_9fontTools_5cu2qu_5cu2qu_dot(__pyx_v_p, __Pyx_c_diff_double(__pyx_v_a, __pyx_v_c)); if (unlikely(__pyx_t_4 == ((double)-1) && PyErr_Occurred())) __PYX_ERR(0, 300, __pyx_L3_error) + __pyx_t_5 = __pyx_f_9fontTools_5cu2qu_5cu2qu_dot(__pyx_v_p, __pyx_v_cd); if (unlikely(__pyx_t_5 == ((double)-1) && PyErr_Occurred())) __PYX_ERR(0, 300, __pyx_L3_error) + if (unlikely(__pyx_t_5 == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + __PYX_ERR(0, 300, __pyx_L3_error) + } + __pyx_v_h = (__pyx_t_4 / __pyx_t_5); + + /* "fontTools/cu2qu/cu2qu.py":299 + * cd = d - c + * p = ab * 1j + * try: # <<<<<<<<<<<<<< + * h = dot(p, a - c) / dot(p, cd) + * except ZeroDivisionError: +*/ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "fontTools/cu2qu/cu2qu.py":301 + * try: + * h = dot(p, a - c) / dot(p, cd) + * except ZeroDivisionError: # <<<<<<<<<<<<<< + * # if 3 or 4 points are equal, we do have an intersection despite the zero-div: + * # return one of the off-curves so that the algorithm can attempt a one-curve +*/ + __pyx_t_6 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(((PyTypeObject*)PyExc_ZeroDivisionError)))); + if (__pyx_t_6) { + __Pyx_AddTraceback("fontTools.cu2qu.cu2qu.calc_intersect", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0) __PYX_ERR(0, 301, __pyx_L5_except_error) + __Pyx_XGOTREF(__pyx_t_7); + __Pyx_XGOTREF(__pyx_t_8); + __Pyx_XGOTREF(__pyx_t_9); + + /* "fontTools/cu2qu/cu2qu.py":306 + * # solution if it's within tolerance: + * # https://github.com/linebender/kurbo/pull/484 + * if b == c and (a == b or c == d): # <<<<<<<<<<<<<< + * return b + * return complex(NAN, NAN) +*/ + __pyx_t_11 = (__Pyx_c_eq_double(__pyx_v_b, __pyx_v_c)); + if (__pyx_t_11) { + } else { + __pyx_t_10 = __pyx_t_11; + goto __pyx_L12_bool_binop_done; + } + __pyx_t_11 = (__Pyx_c_eq_double(__pyx_v_a, __pyx_v_b)); + if (!__pyx_t_11) { + } else { + __pyx_t_10 = __pyx_t_11; + goto __pyx_L12_bool_binop_done; + } + __pyx_t_11 = (__Pyx_c_eq_double(__pyx_v_c, __pyx_v_d)); + __pyx_t_10 = __pyx_t_11; + __pyx_L12_bool_binop_done:; + if (__pyx_t_10) { + + /* "fontTools/cu2qu/cu2qu.py":307 + * # https://github.com/linebender/kurbo/pull/484 + * if b == c and (a == b or c == d): + * return b # <<<<<<<<<<<<<< + * return complex(NAN, NAN) + * return c + cd * h +*/ + __pyx_r = __pyx_v_b; + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + goto __pyx_L6_except_return; + + /* "fontTools/cu2qu/cu2qu.py":306 + * # solution if it's within tolerance: + * # https://github.com/linebender/kurbo/pull/484 + * if b == c and (a == b or c == d): # <<<<<<<<<<<<<< + * return b + * return complex(NAN, NAN) +*/ + } + + /* "fontTools/cu2qu/cu2qu.py":308 + * if b == c and (a == b or c == d): + * return b + * return complex(NAN, NAN) # <<<<<<<<<<<<<< + * return c + cd * h + * +*/ + __pyx_t_13 = NULL; + __Pyx_GetModuleGlobalName(__pyx_t_14, __pyx_mstate_global->__pyx_n_u_NAN); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 308, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_14); + __Pyx_GetModuleGlobalName(__pyx_t_15, __pyx_mstate_global->__pyx_n_u_NAN); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 308, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_15); + __pyx_t_16 = 1; + { + PyObject *__pyx_callargs[3] = {__pyx_t_13, __pyx_t_14, __pyx_t_15}; + __pyx_t_12 = __Pyx_PyObject_FastCall((PyObject*)(&PyComplex_Type), __pyx_callargs+__pyx_t_16, (3-__pyx_t_16) | (__pyx_t_16*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); + __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0; + __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; + __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; + if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 308, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_12); + } + __pyx_t_17 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_12); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 308, __pyx_L5_except_error) + __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; + __pyx_r = __pyx_t_17; + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + goto __pyx_L6_except_return; + } + goto __pyx_L5_except_error; + + /* "fontTools/cu2qu/cu2qu.py":299 + * cd = d - c + * p = ab * 1j + * try: # <<<<<<<<<<<<<< + * h = dot(p, a - c) / dot(p, cd) + * except ZeroDivisionError: +*/ + __pyx_L5_except_error:; + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L6_except_return:; + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L0; + __pyx_L8_try_end:; + } + + /* "fontTools/cu2qu/cu2qu.py":309 + * return b + * return complex(NAN, NAN) + * return c + cd * h # <<<<<<<<<<<<<< + * + * +*/ + __pyx_r = __Pyx_c_sum_double(__pyx_v_c, __Pyx_c_prod_double(__pyx_v_cd, __pyx_t_double_complex_from_parts(__pyx_v_h, 0))); + goto __pyx_L0; + + /* "fontTools/cu2qu/cu2qu.py":278 + * + * + * @cython.cfunc # <<<<<<<<<<<<<< + * @cython.inline + * @cython.returns(cython.complex) +*/ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_XDECREF(__pyx_t_9); + __Pyx_XDECREF(__pyx_t_12); + __Pyx_XDECREF(__pyx_t_13); + __Pyx_XDECREF(__pyx_t_14); + __Pyx_XDECREF(__pyx_t_15); + __Pyx_AddTraceback("fontTools.cu2qu.cu2qu.calc_intersect", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = __pyx_t_double_complex_from_parts(0, 0); + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "fontTools/cu2qu/cu2qu.py":312 + * + * + * @cython.cfunc # <<<<<<<<<<<<<< + * @cython.returns(cython.int) + * @cython.locals( +*/ + +static int __pyx_f_9fontTools_5cu2qu_5cu2qu_cubic_farthest_fit_inside(__pyx_t_double_complex __pyx_v_p0, __pyx_t_double_complex __pyx_v_p1, __pyx_t_double_complex __pyx_v_p2, __pyx_t_double_complex __pyx_v_p3, double __pyx_v_tolerance) { + __pyx_t_double_complex __pyx_v_mid; + __pyx_t_double_complex __pyx_v_deriv3; + int __pyx_r; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + + /* "fontTools/cu2qu/cu2qu.py":341 + * """ + * # First check p2 then p1, as p2 has higher error early on. + * if abs(p2) <= tolerance and abs(p1) <= tolerance: # <<<<<<<<<<<<<< + * return True + * +*/ + __pyx_t_2 = (__Pyx_c_abs_double(__pyx_v_p2) <= __pyx_v_tolerance); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_2 = (__Pyx_c_abs_double(__pyx_v_p1) <= __pyx_v_tolerance); + __pyx_t_1 = __pyx_t_2; + __pyx_L4_bool_binop_done:; + if (__pyx_t_1) { + + /* "fontTools/cu2qu/cu2qu.py":342 + * # First check p2 then p1, as p2 has higher error early on. + * if abs(p2) <= tolerance and abs(p1) <= tolerance: + * return True # <<<<<<<<<<<<<< + * + * # Split. +*/ + __pyx_r = 1; + goto __pyx_L0; + + /* "fontTools/cu2qu/cu2qu.py":341 + * """ + * # First check p2 then p1, as p2 has higher error early on. + * if abs(p2) <= tolerance and abs(p1) <= tolerance: # <<<<<<<<<<<<<< + * return True + * +*/ + } + + /* "fontTools/cu2qu/cu2qu.py":345 + * + * # Split. + * mid = (p0 + 3 * (p1 + p2) + p3) * 0.125 # <<<<<<<<<<<<<< + * if abs(mid) > tolerance: + * return False +*/ + __pyx_v_mid = __Pyx_c_prod_double(__Pyx_c_sum_double(__Pyx_c_sum_double(__pyx_v_p0, __Pyx_c_prod_double(__pyx_t_double_complex_from_parts(3, 0), __Pyx_c_sum_double(__pyx_v_p1, __pyx_v_p2))), __pyx_v_p3), __pyx_t_double_complex_from_parts(0.125, 0)); + + /* "fontTools/cu2qu/cu2qu.py":346 + * # Split. + * mid = (p0 + 3 * (p1 + p2) + p3) * 0.125 + * if abs(mid) > tolerance: # <<<<<<<<<<<<<< + * return False + * deriv3 = (p3 + p2 - p1 - p0) * 0.125 +*/ + __pyx_t_1 = (__Pyx_c_abs_double(__pyx_v_mid) > __pyx_v_tolerance); + if (__pyx_t_1) { + + /* "fontTools/cu2qu/cu2qu.py":347 + * mid = (p0 + 3 * (p1 + p2) + p3) * 0.125 + * if abs(mid) > tolerance: + * return False # <<<<<<<<<<<<<< + * deriv3 = (p3 + p2 - p1 - p0) * 0.125 + * return cubic_farthest_fit_inside( +*/ + __pyx_r = 0; + goto __pyx_L0; + + /* "fontTools/cu2qu/cu2qu.py":346 + * # Split. + * mid = (p0 + 3 * (p1 + p2) + p3) * 0.125 + * if abs(mid) > tolerance: # <<<<<<<<<<<<<< + * return False + * deriv3 = (p3 + p2 - p1 - p0) * 0.125 +*/ + } + + /* "fontTools/cu2qu/cu2qu.py":348 + * if abs(mid) > tolerance: + * return False + * deriv3 = (p3 + p2 - p1 - p0) * 0.125 # <<<<<<<<<<<<<< + * return cubic_farthest_fit_inside( + * p0, (p0 + p1) * 0.5, mid - deriv3, mid, tolerance +*/ + __pyx_v_deriv3 = __Pyx_c_prod_double(__Pyx_c_diff_double(__Pyx_c_diff_double(__Pyx_c_sum_double(__pyx_v_p3, __pyx_v_p2), __pyx_v_p1), __pyx_v_p0), __pyx_t_double_complex_from_parts(0.125, 0)); + + /* "fontTools/cu2qu/cu2qu.py":349 + * return False + * deriv3 = (p3 + p2 - p1 - p0) * 0.125 + * return cubic_farthest_fit_inside( # <<<<<<<<<<<<<< + * p0, (p0 + p1) * 0.5, mid - deriv3, mid, tolerance + * ) and cubic_farthest_fit_inside(mid, mid + deriv3, (p2 + p3) * 0.5, p3, tolerance) +*/ + __pyx_t_4 = __pyx_f_9fontTools_5cu2qu_5cu2qu_cubic_farthest_fit_inside(__pyx_v_p0, __Pyx_c_prod_double(__Pyx_c_sum_double(__pyx_v_p0, __pyx_v_p1), __pyx_t_double_complex_from_parts(0.5, 0)), __Pyx_c_diff_double(__pyx_v_mid, __pyx_v_deriv3), __pyx_v_mid, __pyx_v_tolerance); if (unlikely(__pyx_t_4 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 349, __pyx_L1_error) + if (__pyx_t_4) { + } else { + __pyx_t_3 = __pyx_t_4; + goto __pyx_L7_bool_binop_done; + } + + /* "fontTools/cu2qu/cu2qu.py":351 + * return cubic_farthest_fit_inside( + * p0, (p0 + p1) * 0.5, mid - deriv3, mid, tolerance + * ) and cubic_farthest_fit_inside(mid, mid + deriv3, (p2 + p3) * 0.5, p3, tolerance) # <<<<<<<<<<<<<< + * + * +*/ + __pyx_t_4 = __pyx_f_9fontTools_5cu2qu_5cu2qu_cubic_farthest_fit_inside(__pyx_v_mid, __Pyx_c_sum_double(__pyx_v_mid, __pyx_v_deriv3), __Pyx_c_prod_double(__Pyx_c_sum_double(__pyx_v_p2, __pyx_v_p3), __pyx_t_double_complex_from_parts(0.5, 0)), __pyx_v_p3, __pyx_v_tolerance); if (unlikely(__pyx_t_4 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 351, __pyx_L1_error) + __pyx_t_3 = __pyx_t_4; + __pyx_L7_bool_binop_done:; + __pyx_r = __pyx_t_3; + goto __pyx_L0; + + /* "fontTools/cu2qu/cu2qu.py":312 + * + * + * @cython.cfunc # <<<<<<<<<<<<<< + * @cython.returns(cython.int) + * @cython.locals( +*/ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_AddTraceback("fontTools.cu2qu.cu2qu.cubic_farthest_fit_inside", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + return __pyx_r; +} + +/* "fontTools/cu2qu/cu2qu.py":354 + * + * + * @cython.cfunc # <<<<<<<<<<<<<< + * @cython.inline + * @cython.locals(tolerance=cython.double) +*/ + +static CYTHON_INLINE PyObject *__pyx_f_9fontTools_5cu2qu_5cu2qu_cubic_approx_quadratic(PyObject *__pyx_v_cubic, double __pyx_v_tolerance) { + __pyx_t_double_complex __pyx_v_q1; + __pyx_t_double_complex __pyx_v_c0; + __pyx_t_double_complex __pyx_v_c1; + __pyx_t_double_complex __pyx_v_c2; + __pyx_t_double_complex __pyx_v_c3; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __pyx_t_double_complex __pyx_t_2; + __pyx_t_double_complex __pyx_t_3; + __pyx_t_double_complex __pyx_t_4; + __pyx_t_double_complex __pyx_t_5; + __pyx_t_double_complex __pyx_t_6; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + PyObject *__pyx_t_9 = NULL; + size_t __pyx_t_10; + int __pyx_t_11; + int __pyx_t_12; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("cubic_approx_quadratic", 0); + + /* "fontTools/cu2qu/cu2qu.py":378 + * """ + * + * q1 = calc_intersect(cubic[0], cubic[1], cubic[2], cubic[3]) # <<<<<<<<<<<<<< + * if math.isnan(q1.imag): + * return None +*/ + __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_cubic, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 378, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_1); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 378, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_cubic, 1, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 378, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_1); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 378, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_cubic, 2, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 378, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_4 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_1); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 378, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_cubic, 3, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 378, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_5 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_1); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 378, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_6 = __pyx_f_9fontTools_5cu2qu_5cu2qu_calc_intersect(__pyx_t_2, __pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 378, __pyx_L1_error) + __pyx_v_q1 = __pyx_t_6; + + /* "fontTools/cu2qu/cu2qu.py":379 + * + * q1 = calc_intersect(cubic[0], cubic[1], cubic[2], cubic[3]) + * if math.isnan(q1.imag): # <<<<<<<<<<<<<< + * return None + * c0 = cubic[0] +*/ + __pyx_t_7 = NULL; + __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_mstate_global->__pyx_n_u_math); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 379, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_mstate_global->__pyx_n_u_isnan); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 379, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __pyx_t_8 = PyFloat_FromDouble(__Pyx_CIMAG(__pyx_v_q1)); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 379, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_t_10 = 1; + #if CYTHON_UNPACK_METHODS + if (unlikely(PyMethod_Check(__pyx_t_9))) { + __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_9); + assert(__pyx_t_7); + PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_9); + __Pyx_INCREF(__pyx_t_7); + __Pyx_INCREF(__pyx__function); + __Pyx_DECREF_SET(__pyx_t_9, __pyx__function); + __pyx_t_10 = 0; + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_t_8}; + __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_9, __pyx_callargs+__pyx_t_10, (2-__pyx_t_10) | (__pyx_t_10*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 379, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + } + __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely((__pyx_t_11 < 0))) __PYX_ERR(0, 379, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + if (__pyx_t_11) { + + /* "fontTools/cu2qu/cu2qu.py":380 + * q1 = calc_intersect(cubic[0], cubic[1], cubic[2], cubic[3]) + * if math.isnan(q1.imag): + * return None # <<<<<<<<<<<<<< + * c0 = cubic[0] + * c3 = cubic[3] +*/ + __Pyx_XDECREF(__pyx_r); + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + + /* "fontTools/cu2qu/cu2qu.py":379 + * + * q1 = calc_intersect(cubic[0], cubic[1], cubic[2], cubic[3]) + * if math.isnan(q1.imag): # <<<<<<<<<<<<<< + * return None + * c0 = cubic[0] +*/ + } + + /* "fontTools/cu2qu/cu2qu.py":381 + * if math.isnan(q1.imag): + * return None + * c0 = cubic[0] # <<<<<<<<<<<<<< + * c3 = cubic[3] + * c1 = c0 + (q1 - c0) * (2 / 3) +*/ + __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_cubic, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 381, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_6 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_1); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 381, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_v_c0 = __pyx_t_6; + + /* "fontTools/cu2qu/cu2qu.py":382 + * return None + * c0 = cubic[0] + * c3 = cubic[3] # <<<<<<<<<<<<<< + * c1 = c0 + (q1 - c0) * (2 / 3) + * c2 = c3 + (q1 - c3) * (2 / 3) +*/ + __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_cubic, 3, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 382, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_6 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_1); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 382, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_v_c3 = __pyx_t_6; + + /* "fontTools/cu2qu/cu2qu.py":383 + * c0 = cubic[0] + * c3 = cubic[3] + * c1 = c0 + (q1 - c0) * (2 / 3) # <<<<<<<<<<<<<< + * c2 = c3 + (q1 - c3) * (2 / 3) + * if not cubic_farthest_fit_inside(0, c1 - cubic[1], c2 - cubic[2], 0, tolerance): +*/ + __pyx_v_c1 = __Pyx_c_sum_double(__pyx_v_c0, __Pyx_c_prod_double(__Pyx_c_diff_double(__pyx_v_q1, __pyx_v_c0), __pyx_t_double_complex_from_parts((2.0 / 3.0), 0))); + + /* "fontTools/cu2qu/cu2qu.py":384 + * c3 = cubic[3] + * c1 = c0 + (q1 - c0) * (2 / 3) + * c2 = c3 + (q1 - c3) * (2 / 3) # <<<<<<<<<<<<<< + * if not cubic_farthest_fit_inside(0, c1 - cubic[1], c2 - cubic[2], 0, tolerance): + * return None +*/ + __pyx_v_c2 = __Pyx_c_sum_double(__pyx_v_c3, __Pyx_c_prod_double(__Pyx_c_diff_double(__pyx_v_q1, __pyx_v_c3), __pyx_t_double_complex_from_parts((2.0 / 3.0), 0))); + + /* "fontTools/cu2qu/cu2qu.py":385 + * c1 = c0 + (q1 - c0) * (2 / 3) + * c2 = c3 + (q1 - c3) * (2 / 3) + * if not cubic_farthest_fit_inside(0, c1 - cubic[1], c2 - cubic[2], 0, tolerance): # <<<<<<<<<<<<<< + * return None + * return c0, q1, c3 +*/ + __pyx_t_1 = __pyx_PyComplex_FromComplex(__pyx_v_c1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 385, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_9 = __Pyx_GetItemInt(__pyx_v_cubic, 1, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 385, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_8 = PyNumber_Subtract(__pyx_t_1, __pyx_t_9); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 385, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + __pyx_t_6 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_8); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 385, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __pyx_t_8 = __pyx_PyComplex_FromComplex(__pyx_v_c2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 385, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_t_9 = __Pyx_GetItemInt(__pyx_v_cubic, 2, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 385, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_1 = PyNumber_Subtract(__pyx_t_8, __pyx_t_9); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 385, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + __pyx_t_5 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_1); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 385, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_12 = __pyx_f_9fontTools_5cu2qu_5cu2qu_cubic_farthest_fit_inside(__pyx_t_double_complex_from_parts(0, 0), __pyx_t_6, __pyx_t_5, __pyx_t_double_complex_from_parts(0, 0), __pyx_v_tolerance); if (unlikely(__pyx_t_12 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 385, __pyx_L1_error) + __pyx_t_11 = (!(__pyx_t_12 != 0)); + if (__pyx_t_11) { + + /* "fontTools/cu2qu/cu2qu.py":386 + * c2 = c3 + (q1 - c3) * (2 / 3) + * if not cubic_farthest_fit_inside(0, c1 - cubic[1], c2 - cubic[2], 0, tolerance): + * return None # <<<<<<<<<<<<<< + * return c0, q1, c3 + * +*/ + __Pyx_XDECREF(__pyx_r); + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + + /* "fontTools/cu2qu/cu2qu.py":385 + * c1 = c0 + (q1 - c0) * (2 / 3) + * c2 = c3 + (q1 - c3) * (2 / 3) + * if not cubic_farthest_fit_inside(0, c1 - cubic[1], c2 - cubic[2], 0, tolerance): # <<<<<<<<<<<<<< + * return None + * return c0, q1, c3 +*/ + } + + /* "fontTools/cu2qu/cu2qu.py":387 + * if not cubic_farthest_fit_inside(0, c1 - cubic[1], c2 - cubic[2], 0, tolerance): + * return None + * return c0, q1, c3 # <<<<<<<<<<<<<< + * + * +*/ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __pyx_PyComplex_FromComplex(__pyx_v_c0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 387, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_9 = __pyx_PyComplex_FromComplex(__pyx_v_q1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 387, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_8 = __pyx_PyComplex_FromComplex(__pyx_v_c3); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 387, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_t_7 = PyTuple_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 387, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_GIVEREF(__pyx_t_1); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 387, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_9); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_9) != (0)) __PYX_ERR(0, 387, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_8); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_7, 2, __pyx_t_8) != (0)) __PYX_ERR(0, 387, __pyx_L1_error); + __pyx_t_1 = 0; + __pyx_t_9 = 0; + __pyx_t_8 = 0; + __pyx_r = __pyx_t_7; + __pyx_t_7 = 0; + goto __pyx_L0; + + /* "fontTools/cu2qu/cu2qu.py":354 + * + * + * @cython.cfunc # <<<<<<<<<<<<<< + * @cython.inline + * @cython.locals(tolerance=cython.double) +*/ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_XDECREF(__pyx_t_9); + __Pyx_AddTraceback("fontTools.cu2qu.cu2qu.cubic_approx_quadratic", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "fontTools/cu2qu/cu2qu.py":390 + * + * + * @cython.cfunc # <<<<<<<<<<<<<< + * @cython.locals(n=cython.int, tolerance=cython.double) + * @cython.locals(i=cython.int) +*/ + +static PyObject *__pyx_f_9fontTools_5cu2qu_5cu2qu_cubic_approx_spline(PyObject *__pyx_v_cubic, int __pyx_v_n, double __pyx_v_tolerance, int __pyx_v_all_quadratic) { + __pyx_t_double_complex __pyx_v_q0; + __pyx_t_double_complex __pyx_v_q1; + __pyx_t_double_complex __pyx_v_next_q1; + __pyx_t_double_complex __pyx_v_q2; + __pyx_t_double_complex __pyx_v_d1; + CYTHON_UNUSED __pyx_t_double_complex __pyx_v_c0; + __pyx_t_double_complex __pyx_v_c1; + __pyx_t_double_complex __pyx_v_c2; + __pyx_t_double_complex __pyx_v_c3; + int __pyx_v_i; + PyObject *__pyx_v_cubics = NULL; + PyObject *__pyx_v_next_cubic = NULL; + PyObject *__pyx_v_spline = NULL; + __pyx_t_double_complex __pyx_v_d0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_t_3; + __pyx_t_double_complex __pyx_t_4; + __pyx_t_double_complex __pyx_t_5; + __pyx_t_double_complex __pyx_t_6; + __pyx_t_double_complex __pyx_t_7; + PyObject *__pyx_t_8 = NULL; + __pyx_t_double_complex __pyx_t_9; + PyObject *__pyx_t_10 = NULL; + long __pyx_t_11; + long __pyx_t_12; + int __pyx_t_13; + PyObject *__pyx_t_14 = NULL; + PyObject *__pyx_t_15 = NULL; + PyObject *(*__pyx_t_16)(PyObject *); + long __pyx_t_17; + int __pyx_t_18; + int __pyx_t_19; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("cubic_approx_spline", 0); + + /* "fontTools/cu2qu/cu2qu.py":419 + * """ + * + * if n == 1: # <<<<<<<<<<<<<< + * return cubic_approx_quadratic(cubic, tolerance) + * if n == 2 and all_quadratic == False: +*/ + __pyx_t_1 = (__pyx_v_n == 1); + if (__pyx_t_1) { + + /* "fontTools/cu2qu/cu2qu.py":420 + * + * if n == 1: + * return cubic_approx_quadratic(cubic, tolerance) # <<<<<<<<<<<<<< + * if n == 2 and all_quadratic == False: + * return cubic +*/ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __pyx_f_9fontTools_5cu2qu_5cu2qu_cubic_approx_quadratic(__pyx_v_cubic, __pyx_v_tolerance); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 420, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "fontTools/cu2qu/cu2qu.py":419 + * """ + * + * if n == 1: # <<<<<<<<<<<<<< + * return cubic_approx_quadratic(cubic, tolerance) + * if n == 2 and all_quadratic == False: +*/ + } + + /* "fontTools/cu2qu/cu2qu.py":421 + * if n == 1: + * return cubic_approx_quadratic(cubic, tolerance) + * if n == 2 and all_quadratic == False: # <<<<<<<<<<<<<< + * return cubic + * +*/ + __pyx_t_3 = (__pyx_v_n == 2); + if (__pyx_t_3) { + } else { + __pyx_t_1 = __pyx_t_3; + goto __pyx_L5_bool_binop_done; + } + __pyx_t_3 = (__pyx_v_all_quadratic == 0); + __pyx_t_1 = __pyx_t_3; + __pyx_L5_bool_binop_done:; + if (__pyx_t_1) { + + /* "fontTools/cu2qu/cu2qu.py":422 + * return cubic_approx_quadratic(cubic, tolerance) + * if n == 2 and all_quadratic == False: + * return cubic # <<<<<<<<<<<<<< + * + * cubics = split_cubic_into_n_iter(cubic[0], cubic[1], cubic[2], cubic[3], n) +*/ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_cubic); + __pyx_r = __pyx_v_cubic; + goto __pyx_L0; + + /* "fontTools/cu2qu/cu2qu.py":421 + * if n == 1: + * return cubic_approx_quadratic(cubic, tolerance) + * if n == 2 and all_quadratic == False: # <<<<<<<<<<<<<< + * return cubic + * +*/ + } + + /* "fontTools/cu2qu/cu2qu.py":424 + * return cubic + * + * cubics = split_cubic_into_n_iter(cubic[0], cubic[1], cubic[2], cubic[3], n) # <<<<<<<<<<<<<< + * + * # calculate the spline of quadratics and check errors at the same time. +*/ + __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_cubic, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 424, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_2); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 424, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_cubic, 1, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 424, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_5 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_2); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 424, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_cubic, 2, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 424, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_6 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_2); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 424, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_cubic, 3, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 424, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_7 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_2); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 424, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_v_n); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 424, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_8 = __pyx_f_9fontTools_5cu2qu_5cu2qu_split_cubic_into_n_iter(__pyx_t_4, __pyx_t_5, __pyx_t_6, __pyx_t_7, __pyx_t_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 424, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_v_cubics = __pyx_t_8; + __pyx_t_8 = 0; + + /* "fontTools/cu2qu/cu2qu.py":427 + * + * # calculate the spline of quadratics and check errors at the same time. + * next_cubic = next(cubics) # <<<<<<<<<<<<<< + * next_q1 = cubic_approx_control( + * 0, next_cubic[0], next_cubic[1], next_cubic[2], next_cubic[3] +*/ + __pyx_t_8 = __Pyx_PyIter_Next(__pyx_v_cubics); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 427, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_v_next_cubic = __pyx_t_8; + __pyx_t_8 = 0; + + /* "fontTools/cu2qu/cu2qu.py":429 + * next_cubic = next(cubics) + * next_q1 = cubic_approx_control( + * 0, next_cubic[0], next_cubic[1], next_cubic[2], next_cubic[3] # <<<<<<<<<<<<<< + * ) + * q2 = cubic[0] +*/ + __pyx_t_8 = __Pyx_GetItemInt(__pyx_v_next_cubic, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 429, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_t_7 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_8); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 429, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __pyx_t_8 = __Pyx_GetItemInt(__pyx_v_next_cubic, 1, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 429, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_t_6 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_8); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 429, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __pyx_t_8 = __Pyx_GetItemInt(__pyx_v_next_cubic, 2, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 429, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_t_5 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_8); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 429, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __pyx_t_8 = __Pyx_GetItemInt(__pyx_v_next_cubic, 3, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 429, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_t_4 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_8); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 429, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + + /* "fontTools/cu2qu/cu2qu.py":428 + * # calculate the spline of quadratics and check errors at the same time. + * next_cubic = next(cubics) + * next_q1 = cubic_approx_control( # <<<<<<<<<<<<<< + * 0, next_cubic[0], next_cubic[1], next_cubic[2], next_cubic[3] + * ) +*/ + __pyx_t_9 = __pyx_f_9fontTools_5cu2qu_5cu2qu_cubic_approx_control(0.0, __pyx_t_7, __pyx_t_6, __pyx_t_5, __pyx_t_4); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 428, __pyx_L1_error) + __pyx_v_next_q1 = __pyx_t_9; + + /* "fontTools/cu2qu/cu2qu.py":431 + * 0, next_cubic[0], next_cubic[1], next_cubic[2], next_cubic[3] + * ) + * q2 = cubic[0] # <<<<<<<<<<<<<< + * d1 = 0j + * spline = [cubic[0], next_q1] +*/ + __pyx_t_8 = __Pyx_GetItemInt(__pyx_v_cubic, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 431, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_t_9 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_8); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 431, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __pyx_v_q2 = __pyx_t_9; + + /* "fontTools/cu2qu/cu2qu.py":432 + * ) + * q2 = cubic[0] + * d1 = 0j # <<<<<<<<<<<<<< + * spline = [cubic[0], next_q1] + * for i in range(1, n + 1): +*/ + __pyx_v_d1 = __pyx_t_double_complex_from_parts(0, 0.0); + + /* "fontTools/cu2qu/cu2qu.py":433 + * q2 = cubic[0] + * d1 = 0j + * spline = [cubic[0], next_q1] # <<<<<<<<<<<<<< + * for i in range(1, n + 1): + * # Current cubic to convert +*/ + __pyx_t_8 = __Pyx_GetItemInt(__pyx_v_cubic, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 433, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_t_2 = __pyx_PyComplex_FromComplex(__pyx_v_next_q1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 433, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_10 = PyList_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 433, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_GIVEREF(__pyx_t_8); + if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 0, __pyx_t_8) != (0)) __PYX_ERR(0, 433, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_2); + if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 1, __pyx_t_2) != (0)) __PYX_ERR(0, 433, __pyx_L1_error); + __pyx_t_8 = 0; + __pyx_t_2 = 0; + __pyx_v_spline = ((PyObject*)__pyx_t_10); + __pyx_t_10 = 0; + + /* "fontTools/cu2qu/cu2qu.py":434 + * d1 = 0j + * spline = [cubic[0], next_q1] + * for i in range(1, n + 1): # <<<<<<<<<<<<<< + * # Current cubic to convert + * c0, c1, c2, c3 = next_cubic +*/ + __pyx_t_11 = (__pyx_v_n + 1); + __pyx_t_12 = __pyx_t_11; + for (__pyx_t_13 = 1; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { + __pyx_v_i = __pyx_t_13; + + /* "fontTools/cu2qu/cu2qu.py":436 + * for i in range(1, n + 1): + * # Current cubic to convert + * c0, c1, c2, c3 = next_cubic # <<<<<<<<<<<<<< + * + * # Current quadratic approximation of current cubic +*/ + if ((likely(PyTuple_CheckExact(__pyx_v_next_cubic))) || (PyList_CheckExact(__pyx_v_next_cubic))) { + PyObject* sequence = __pyx_v_next_cubic; + Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); + if (unlikely(size != 4)) { + if (size > 4) __Pyx_RaiseTooManyValuesError(4); + else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); + __PYX_ERR(0, 436, __pyx_L1_error) + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + if (likely(PyTuple_CheckExact(sequence))) { + __pyx_t_10 = PyTuple_GET_ITEM(sequence, 0); + __Pyx_INCREF(__pyx_t_10); + __pyx_t_2 = PyTuple_GET_ITEM(sequence, 1); + __Pyx_INCREF(__pyx_t_2); + __pyx_t_8 = PyTuple_GET_ITEM(sequence, 2); + __Pyx_INCREF(__pyx_t_8); + __pyx_t_14 = PyTuple_GET_ITEM(sequence, 3); + __Pyx_INCREF(__pyx_t_14); + } else { + __pyx_t_10 = __Pyx_PyList_GetItemRefFast(sequence, 0, __Pyx_ReferenceSharing_SharedReference); + if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 436, __pyx_L1_error) + __Pyx_XGOTREF(__pyx_t_10); + __pyx_t_2 = __Pyx_PyList_GetItemRefFast(sequence, 1, __Pyx_ReferenceSharing_SharedReference); + if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 436, __pyx_L1_error) + __Pyx_XGOTREF(__pyx_t_2); + __pyx_t_8 = __Pyx_PyList_GetItemRefFast(sequence, 2, __Pyx_ReferenceSharing_SharedReference); + if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 436, __pyx_L1_error) + __Pyx_XGOTREF(__pyx_t_8); + __pyx_t_14 = __Pyx_PyList_GetItemRefFast(sequence, 3, __Pyx_ReferenceSharing_SharedReference); + if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 436, __pyx_L1_error) + __Pyx_XGOTREF(__pyx_t_14); + } + #else + { + Py_ssize_t i; + PyObject** temps[4] = {&__pyx_t_10,&__pyx_t_2,&__pyx_t_8,&__pyx_t_14}; + for (i=0; i < 4; i++) { + PyObject* item = __Pyx_PySequence_ITEM(sequence, i); if (unlikely(!item)) __PYX_ERR(0, 436, __pyx_L1_error) + __Pyx_GOTREF(item); + *(temps[i]) = item; + } + } + #endif + } else { + Py_ssize_t index = -1; + PyObject** temps[4] = {&__pyx_t_10,&__pyx_t_2,&__pyx_t_8,&__pyx_t_14}; + __pyx_t_15 = PyObject_GetIter(__pyx_v_next_cubic); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 436, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_15); + __pyx_t_16 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_15); + for (index=0; index < 4; index++) { + PyObject* item = __pyx_t_16(__pyx_t_15); if (unlikely(!item)) goto __pyx_L9_unpacking_failed; + __Pyx_GOTREF(item); + *(temps[index]) = item; + } + if (__Pyx_IternextUnpackEndCheck(__pyx_t_16(__pyx_t_15), 4) < (0)) __PYX_ERR(0, 436, __pyx_L1_error) + __pyx_t_16 = NULL; + __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; + goto __pyx_L10_unpacking_done; + __pyx_L9_unpacking_failed:; + __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; + __pyx_t_16 = NULL; + if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); + __PYX_ERR(0, 436, __pyx_L1_error) + __pyx_L10_unpacking_done:; + } + __pyx_t_9 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_10); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 436, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __pyx_t_4 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_2); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 436, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_5 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_8); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 436, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __pyx_t_6 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_14); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 436, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; + __pyx_v_c0 = __pyx_t_9; + __pyx_v_c1 = __pyx_t_4; + __pyx_v_c2 = __pyx_t_5; + __pyx_v_c3 = __pyx_t_6; + + /* "fontTools/cu2qu/cu2qu.py":439 + * + * # Current quadratic approximation of current cubic + * q0 = q2 # <<<<<<<<<<<<<< + * q1 = next_q1 + * if i < n: +*/ + __pyx_v_q0 = __pyx_v_q2; + + /* "fontTools/cu2qu/cu2qu.py":440 + * # Current quadratic approximation of current cubic + * q0 = q2 + * q1 = next_q1 # <<<<<<<<<<<<<< + * if i < n: + * next_cubic = next(cubics) +*/ + __pyx_v_q1 = __pyx_v_next_q1; + + /* "fontTools/cu2qu/cu2qu.py":441 + * q0 = q2 + * q1 = next_q1 + * if i < n: # <<<<<<<<<<<<<< + * next_cubic = next(cubics) + * next_q1 = cubic_approx_control( +*/ + __pyx_t_1 = (__pyx_v_i < __pyx_v_n); + if (__pyx_t_1) { + + /* "fontTools/cu2qu/cu2qu.py":442 + * q1 = next_q1 + * if i < n: + * next_cubic = next(cubics) # <<<<<<<<<<<<<< + * next_q1 = cubic_approx_control( + * i / (n - 1), next_cubic[0], next_cubic[1], next_cubic[2], next_cubic[3] +*/ + __pyx_t_14 = __Pyx_PyIter_Next(__pyx_v_cubics); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 442, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_14); + __Pyx_DECREF_SET(__pyx_v_next_cubic, __pyx_t_14); + __pyx_t_14 = 0; + + /* "fontTools/cu2qu/cu2qu.py":444 + * next_cubic = next(cubics) + * next_q1 = cubic_approx_control( + * i / (n - 1), next_cubic[0], next_cubic[1], next_cubic[2], next_cubic[3] # <<<<<<<<<<<<<< + * ) + * spline.append(next_q1) +*/ + __pyx_t_17 = (__pyx_v_n - 1); + if (unlikely(__pyx_t_17 == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + __PYX_ERR(0, 444, __pyx_L1_error) + } + __pyx_t_14 = __Pyx_GetItemInt(__pyx_v_next_cubic, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 444, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_14); + __pyx_t_6 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_14); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 444, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; + __pyx_t_14 = __Pyx_GetItemInt(__pyx_v_next_cubic, 1, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 444, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_14); + __pyx_t_5 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_14); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 444, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; + __pyx_t_14 = __Pyx_GetItemInt(__pyx_v_next_cubic, 2, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 444, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_14); + __pyx_t_4 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_14); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 444, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; + __pyx_t_14 = __Pyx_GetItemInt(__pyx_v_next_cubic, 3, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 444, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_14); + __pyx_t_9 = __Pyx_PyComplex_As___pyx_t_double_complex(__pyx_t_14); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 444, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; + + /* "fontTools/cu2qu/cu2qu.py":443 + * if i < n: + * next_cubic = next(cubics) + * next_q1 = cubic_approx_control( # <<<<<<<<<<<<<< + * i / (n - 1), next_cubic[0], next_cubic[1], next_cubic[2], next_cubic[3] + * ) +*/ + __pyx_t_7 = __pyx_f_9fontTools_5cu2qu_5cu2qu_cubic_approx_control((((double)__pyx_v_i) / ((double)__pyx_t_17)), __pyx_t_6, __pyx_t_5, __pyx_t_4, __pyx_t_9); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 443, __pyx_L1_error) + __pyx_v_next_q1 = __pyx_t_7; + + /* "fontTools/cu2qu/cu2qu.py":446 + * i / (n - 1), next_cubic[0], next_cubic[1], next_cubic[2], next_cubic[3] + * ) + * spline.append(next_q1) # <<<<<<<<<<<<<< + * q2 = (q1 + next_q1) * 0.5 + * else: +*/ + __pyx_t_14 = __pyx_PyComplex_FromComplex(__pyx_v_next_q1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 446, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_14); + __pyx_t_18 = __Pyx_PyList_Append(__pyx_v_spline, __pyx_t_14); if (unlikely(__pyx_t_18 == ((int)-1))) __PYX_ERR(0, 446, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; + + /* "fontTools/cu2qu/cu2qu.py":447 + * ) + * spline.append(next_q1) + * q2 = (q1 + next_q1) * 0.5 # <<<<<<<<<<<<<< + * else: + * q2 = c3 +*/ + __pyx_v_q2 = __Pyx_c_prod_double(__Pyx_c_sum_double(__pyx_v_q1, __pyx_v_next_q1), __pyx_t_double_complex_from_parts(0.5, 0)); + + /* "fontTools/cu2qu/cu2qu.py":441 + * q0 = q2 + * q1 = next_q1 + * if i < n: # <<<<<<<<<<<<<< + * next_cubic = next(cubics) + * next_q1 = cubic_approx_control( +*/ + goto __pyx_L11; + } + + /* "fontTools/cu2qu/cu2qu.py":449 + * q2 = (q1 + next_q1) * 0.5 + * else: + * q2 = c3 # <<<<<<<<<<<<<< + * + * # End-point deltas +*/ + /*else*/ { + __pyx_v_q2 = __pyx_v_c3; + } + __pyx_L11:; + + /* "fontTools/cu2qu/cu2qu.py":452 + * + * # End-point deltas + * d0 = d1 # <<<<<<<<<<<<<< + * d1 = q2 - c3 + * +*/ + __pyx_v_d0 = __pyx_v_d1; + + /* "fontTools/cu2qu/cu2qu.py":453 + * # End-point deltas + * d0 = d1 + * d1 = q2 - c3 # <<<<<<<<<<<<<< + * + * if abs(d1) > tolerance or not cubic_farthest_fit_inside( +*/ + __pyx_v_d1 = __Pyx_c_diff_double(__pyx_v_q2, __pyx_v_c3); + + /* "fontTools/cu2qu/cu2qu.py":455 + * d1 = q2 - c3 + * + * if abs(d1) > tolerance or not cubic_farthest_fit_inside( # <<<<<<<<<<<<<< + * d0, + * q0 + (q1 - q0) * (2 / 3) - c1, +*/ + __pyx_t_3 = (__Pyx_c_abs_double(__pyx_v_d1) > __pyx_v_tolerance); + if (!__pyx_t_3) { + } else { + __pyx_t_1 = __pyx_t_3; + goto __pyx_L13_bool_binop_done; + } + + /* "fontTools/cu2qu/cu2qu.py":460 + * q2 + (q1 - q2) * (2 / 3) - c2, + * d1, + * tolerance, # <<<<<<<<<<<<<< + * ): + * return None +*/ + __pyx_t_19 = __pyx_f_9fontTools_5cu2qu_5cu2qu_cubic_farthest_fit_inside(__pyx_v_d0, __Pyx_c_diff_double(__Pyx_c_sum_double(__pyx_v_q0, __Pyx_c_prod_double(__Pyx_c_diff_double(__pyx_v_q1, __pyx_v_q0), __pyx_t_double_complex_from_parts((2.0 / 3.0), 0))), __pyx_v_c1), __Pyx_c_diff_double(__Pyx_c_sum_double(__pyx_v_q2, __Pyx_c_prod_double(__Pyx_c_diff_double(__pyx_v_q1, __pyx_v_q2), __pyx_t_double_complex_from_parts((2.0 / 3.0), 0))), __pyx_v_c2), __pyx_v_d1, __pyx_v_tolerance); if (unlikely(__pyx_t_19 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 455, __pyx_L1_error) + + /* "fontTools/cu2qu/cu2qu.py":455 + * d1 = q2 - c3 + * + * if abs(d1) > tolerance or not cubic_farthest_fit_inside( # <<<<<<<<<<<<<< + * d0, + * q0 + (q1 - q0) * (2 / 3) - c1, +*/ + __pyx_t_3 = (!(__pyx_t_19 != 0)); + __pyx_t_1 = __pyx_t_3; + __pyx_L13_bool_binop_done:; + if (__pyx_t_1) { + + /* "fontTools/cu2qu/cu2qu.py":462 + * tolerance, + * ): + * return None # <<<<<<<<<<<<<< + * spline.append(cubic[3]) + * +*/ + __Pyx_XDECREF(__pyx_r); + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + + /* "fontTools/cu2qu/cu2qu.py":455 + * d1 = q2 - c3 + * + * if abs(d1) > tolerance or not cubic_farthest_fit_inside( # <<<<<<<<<<<<<< + * d0, + * q0 + (q1 - q0) * (2 / 3) - c1, +*/ + } + } + + /* "fontTools/cu2qu/cu2qu.py":463 + * ): + * return None + * spline.append(cubic[3]) # <<<<<<<<<<<<<< + * + * return spline +*/ + __pyx_t_14 = __Pyx_GetItemInt(__pyx_v_cubic, 3, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 463, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_14); + __pyx_t_18 = __Pyx_PyList_Append(__pyx_v_spline, __pyx_t_14); if (unlikely(__pyx_t_18 == ((int)-1))) __PYX_ERR(0, 463, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; + + /* "fontTools/cu2qu/cu2qu.py":465 + * spline.append(cubic[3]) + * + * return spline # <<<<<<<<<<<<<< + * + * +*/ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_spline); + __pyx_r = __pyx_v_spline; + goto __pyx_L0; + + /* "fontTools/cu2qu/cu2qu.py":390 + * + * + * @cython.cfunc # <<<<<<<<<<<<<< + * @cython.locals(n=cython.int, tolerance=cython.double) + * @cython.locals(i=cython.int) +*/ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_XDECREF(__pyx_t_10); + __Pyx_XDECREF(__pyx_t_14); + __Pyx_XDECREF(__pyx_t_15); + __Pyx_AddTraceback("fontTools.cu2qu.cu2qu.cubic_approx_spline", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_cubics); + __Pyx_XDECREF(__pyx_v_next_cubic); + __Pyx_XDECREF(__pyx_v_spline); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "fontTools/cu2qu/cu2qu.py":468 + * + * + * @cython.locals(max_err=cython.double) # <<<<<<<<<<<<<< + * @cython.locals(n=cython.int) + * @cython.locals(all_quadratic=cython.int) +*/ + +/* Python wrapper */ +static PyObject *__pyx_pw_9fontTools_5cu2qu_5cu2qu_4curve_to_quadratic(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +PyDoc_STRVAR(__pyx_doc_9fontTools_5cu2qu_5cu2qu_3curve_to_quadratic, "curve_to_quadratic(curve, double max_err, int all_quadratic=True)\n\nApproximate a cubic Bezier curve with a spline of n quadratics.\n\nArgs:\n cubic (sequence): Four 2D tuples representing control points of\n the cubic Bezier curve.\n max_err (double): Permitted deviation from the original curve.\n all_quadratic (bool): If True (default) returned value is a\n quadratic spline. If False, it's either a single quadratic\n curve or a single cubic curve.\n\nReturns:\n If all_quadratic is True: A list of 2D tuples, representing\n control points of the quadratic spline if it fits within the\n given tolerance, or ``None`` if no suitable spline could be\n calculated.\n\n If all_quadratic is False: Either a quadratic curve (if length\n of output is 3), or a cubic curve (if length of output is 4)."); +static PyMethodDef __pyx_mdef_9fontTools_5cu2qu_5cu2qu_4curve_to_quadratic = {"curve_to_quadratic", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_5cu2qu_5cu2qu_4curve_to_quadratic, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_5cu2qu_5cu2qu_3curve_to_quadratic}; +static PyObject *__pyx_pw_9fontTools_5cu2qu_5cu2qu_4curve_to_quadratic(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + PyObject *__pyx_v_curve = 0; + double __pyx_v_max_err; + int __pyx_v_all_quadratic; + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[3] = {0,0,0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("curve_to_quadratic (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_SIZE + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + { + PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_curve,&__pyx_mstate_global->__pyx_n_u_max_err,&__pyx_mstate_global->__pyx_n_u_all_quadratic,0}; + const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; + if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 468, __pyx_L3_error) + if (__pyx_kwds_len > 0) { + switch (__pyx_nargs) { + case 3: + values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); + if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 468, __pyx_L3_error) + CYTHON_FALLTHROUGH; + case 2: + values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); + if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 468, __pyx_L3_error) + CYTHON_FALLTHROUGH; + case 1: + values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); + if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 468, __pyx_L3_error) + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "curve_to_quadratic", 0) < (0)) __PYX_ERR(0, 468, __pyx_L3_error) + for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { + if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("curve_to_quadratic", 0, 2, 3, i); __PYX_ERR(0, 468, __pyx_L3_error) } + } + } else { + switch (__pyx_nargs) { + case 3: + values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); + if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 468, __pyx_L3_error) + CYTHON_FALLTHROUGH; + case 2: + values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); + if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 468, __pyx_L3_error) + values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); + if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 468, __pyx_L3_error) + break; + default: goto __pyx_L5_argtuple_error; + } + } + __pyx_v_curve = values[0]; + __pyx_v_max_err = __Pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_max_err == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 471, __pyx_L3_error) + if (values[2]) { + __pyx_v_all_quadratic = __Pyx_PyLong_As_int(values[2]); if (unlikely((__pyx_v_all_quadratic == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 471, __pyx_L3_error) + } else { + + /* "fontTools/cu2qu/cu2qu.py":471 + * @cython.locals(n=cython.int) + * @cython.locals(all_quadratic=cython.int) + * def curve_to_quadratic(curve, max_err, all_quadratic=True): # <<<<<<<<<<<<<< + * """Approximate a cubic Bezier curve with a spline of n quadratics. + * +*/ + __pyx_v_all_quadratic = ((int)((int)1)); + } + } + goto __pyx_L6_skip; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("curve_to_quadratic", 0, 2, 3, __pyx_nargs); __PYX_ERR(0, 468, __pyx_L3_error) + __pyx_L6_skip:; + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + Py_XDECREF(values[__pyx_temp]); + } + __Pyx_AddTraceback("fontTools.cu2qu.cu2qu.curve_to_quadratic", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_9fontTools_5cu2qu_5cu2qu_3curve_to_quadratic(__pyx_self, __pyx_v_curve, __pyx_v_max_err, __pyx_v_all_quadratic); + + /* "fontTools/cu2qu/cu2qu.py":468 + * + * + * @cython.locals(max_err=cython.double) # <<<<<<<<<<<<<< + * @cython.locals(n=cython.int) + * @cython.locals(all_quadratic=cython.int) +*/ + + /* function exit code */ + for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + Py_XDECREF(values[__pyx_temp]); + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9fontTools_5cu2qu_5cu2qu_3curve_to_quadratic(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_curve, double __pyx_v_max_err, int __pyx_v_all_quadratic) { + int __pyx_v_n; + PyObject *__pyx_v_spline = NULL; + PyObject *__pyx_7genexpr__pyx_v_p = NULL; + PyObject *__pyx_8genexpr1__pyx_v_s = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + Py_ssize_t __pyx_t_3; + PyObject *(*__pyx_t_4)(PyObject *); + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + Py_ssize_t __pyx_t_7; + int __pyx_t_8; + int __pyx_t_9; + Py_ssize_t __pyx_t_10; + PyObject *__pyx_t_11 = NULL; + size_t __pyx_t_12; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("curve_to_quadratic", 0); + __Pyx_INCREF(__pyx_v_curve); + + /* "fontTools/cu2qu/cu2qu.py":492 + * """ + * + * curve = [complex(*p) for p in curve] # <<<<<<<<<<<<<< + * + * for n in range(1, MAX_N + 1): +*/ + { /* enter inner scope */ + __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 492, __pyx_L5_error) + __Pyx_GOTREF(__pyx_t_1); + if (likely(PyList_CheckExact(__pyx_v_curve)) || PyTuple_CheckExact(__pyx_v_curve)) { + __pyx_t_2 = __pyx_v_curve; __Pyx_INCREF(__pyx_t_2); + __pyx_t_3 = 0; + __pyx_t_4 = NULL; + } else { + __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_curve); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 492, __pyx_L5_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 492, __pyx_L5_error) + } + for (;;) { + if (likely(!__pyx_t_4)) { + if (likely(PyList_CheckExact(__pyx_t_2))) { + { + Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_2); + #if !CYTHON_ASSUME_SAFE_SIZE + if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 492, __pyx_L5_error) + #endif + if (__pyx_t_3 >= __pyx_temp) break; + } + __pyx_t_5 = __Pyx_PyList_GetItemRefFast(__pyx_t_2, __pyx_t_3, __Pyx_ReferenceSharing_OwnStrongReference); + ++__pyx_t_3; + } else { + { + Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_2); + #if !CYTHON_ASSUME_SAFE_SIZE + if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 492, __pyx_L5_error) + #endif + if (__pyx_t_3 >= __pyx_temp) break; + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_5 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3)); + #else + __pyx_t_5 = __Pyx_PySequence_ITEM(__pyx_t_2, __pyx_t_3); + #endif + ++__pyx_t_3; + } + if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 492, __pyx_L5_error) + } else { + __pyx_t_5 = __pyx_t_4(__pyx_t_2); + if (unlikely(!__pyx_t_5)) { + PyObject* exc_type = PyErr_Occurred(); + if (exc_type) { + if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 492, __pyx_L5_error) + PyErr_Clear(); + } + break; + } + } + __Pyx_GOTREF(__pyx_t_5); + __Pyx_XDECREF_SET(__pyx_7genexpr__pyx_v_p, __pyx_t_5); + __pyx_t_5 = 0; + __pyx_t_5 = __Pyx_PySequence_Tuple(__pyx_7genexpr__pyx_v_p); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 492, __pyx_L5_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = __Pyx_PyObject_Call(((PyObject *)(&PyComplex_Type)), __pyx_t_5, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 492, __pyx_L5_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_6))) __PYX_ERR(0, 492, __pyx_L5_error) + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + } + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_7genexpr__pyx_v_p); __pyx_7genexpr__pyx_v_p = 0; + goto __pyx_L9_exit_scope; + __pyx_L5_error:; + __Pyx_XDECREF(__pyx_7genexpr__pyx_v_p); __pyx_7genexpr__pyx_v_p = 0; + goto __pyx_L1_error; + __pyx_L9_exit_scope:; + } /* exit inner scope */ + __Pyx_DECREF_SET(__pyx_v_curve, __pyx_t_1); + __pyx_t_1 = 0; + + /* "fontTools/cu2qu/cu2qu.py":494 + * curve = [complex(*p) for p in curve] + * + * for n in range(1, MAX_N + 1): # <<<<<<<<<<<<<< + * spline = cubic_approx_spline(curve, n, max_err, all_quadratic) + * if spline is not None: +*/ + __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_MAX_N); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 494, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyLong_AddObjC(__pyx_t_1, __pyx_mstate_global->__pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 494, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_3 = __Pyx_PyIndex_AsSsize_t(__pyx_t_2); if (unlikely((__pyx_t_3 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 494, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_7 = __pyx_t_3; + for (__pyx_t_8 = 1; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { + __pyx_v_n = __pyx_t_8; + + /* "fontTools/cu2qu/cu2qu.py":495 + * + * for n in range(1, MAX_N + 1): + * spline = cubic_approx_spline(curve, n, max_err, all_quadratic) # <<<<<<<<<<<<<< + * if spline is not None: + * # done. go home +*/ + __pyx_t_2 = __pyx_f_9fontTools_5cu2qu_5cu2qu_cubic_approx_spline(__pyx_v_curve, __pyx_v_n, __pyx_v_max_err, __pyx_v_all_quadratic); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 495, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_XDECREF_SET(__pyx_v_spline, __pyx_t_2); + __pyx_t_2 = 0; + + /* "fontTools/cu2qu/cu2qu.py":496 + * for n in range(1, MAX_N + 1): + * spline = cubic_approx_spline(curve, n, max_err, all_quadratic) + * if spline is not None: # <<<<<<<<<<<<<< + * # done. go home + * return [(s.real, s.imag) for s in spline] +*/ + __pyx_t_9 = (__pyx_v_spline != Py_None); + if (__pyx_t_9) { + + /* "fontTools/cu2qu/cu2qu.py":498 + * if spline is not None: + * # done. go home + * return [(s.real, s.imag) for s in spline] # <<<<<<<<<<<<<< + * + * raise ApproxNotFoundError(curve) +*/ + __Pyx_XDECREF(__pyx_r); + { /* enter inner scope */ + __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 498, __pyx_L15_error) + __Pyx_GOTREF(__pyx_t_2); + if (likely(PyList_CheckExact(__pyx_v_spline)) || PyTuple_CheckExact(__pyx_v_spline)) { + __pyx_t_1 = __pyx_v_spline; __Pyx_INCREF(__pyx_t_1); + __pyx_t_10 = 0; + __pyx_t_4 = NULL; + } else { + __pyx_t_10 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_spline); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 498, __pyx_L15_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_4 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 498, __pyx_L15_error) + } + for (;;) { + if (likely(!__pyx_t_4)) { + if (likely(PyList_CheckExact(__pyx_t_1))) { + { + Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_1); + #if !CYTHON_ASSUME_SAFE_SIZE + if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 498, __pyx_L15_error) + #endif + if (__pyx_t_10 >= __pyx_temp) break; + } + __pyx_t_6 = __Pyx_PyList_GetItemRefFast(__pyx_t_1, __pyx_t_10, __Pyx_ReferenceSharing_OwnStrongReference); + ++__pyx_t_10; + } else { + { + Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_1); + #if !CYTHON_ASSUME_SAFE_SIZE + if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 498, __pyx_L15_error) + #endif + if (__pyx_t_10 >= __pyx_temp) break; + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_6 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_10)); + #else + __pyx_t_6 = __Pyx_PySequence_ITEM(__pyx_t_1, __pyx_t_10); + #endif + ++__pyx_t_10; + } + if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 498, __pyx_L15_error) + } else { + __pyx_t_6 = __pyx_t_4(__pyx_t_1); + if (unlikely(!__pyx_t_6)) { + PyObject* exc_type = PyErr_Occurred(); + if (exc_type) { + if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 498, __pyx_L15_error) + PyErr_Clear(); + } + break; + } + } + __Pyx_GOTREF(__pyx_t_6); + __Pyx_XDECREF_SET(__pyx_8genexpr1__pyx_v_s, __pyx_t_6); + __pyx_t_6 = 0; + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_8genexpr1__pyx_v_s, __pyx_mstate_global->__pyx_n_u_real); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 498, __pyx_L15_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_8genexpr1__pyx_v_s, __pyx_mstate_global->__pyx_n_u_imag); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 498, __pyx_L15_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 498, __pyx_L15_error) + __Pyx_GOTREF(__pyx_t_11); + __Pyx_GIVEREF(__pyx_t_6); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 498, __pyx_L15_error); + __Pyx_GIVEREF(__pyx_t_5); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_5) != (0)) __PYX_ERR(0, 498, __pyx_L15_error); + __pyx_t_6 = 0; + __pyx_t_5 = 0; + if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_11))) __PYX_ERR(0, 498, __pyx_L15_error) + __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; + } + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_8genexpr1__pyx_v_s); __pyx_8genexpr1__pyx_v_s = 0; + goto __pyx_L19_exit_scope; + __pyx_L15_error:; + __Pyx_XDECREF(__pyx_8genexpr1__pyx_v_s); __pyx_8genexpr1__pyx_v_s = 0; + goto __pyx_L1_error; + __pyx_L19_exit_scope:; + } /* exit inner scope */ + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "fontTools/cu2qu/cu2qu.py":496 + * for n in range(1, MAX_N + 1): + * spline = cubic_approx_spline(curve, n, max_err, all_quadratic) + * if spline is not None: # <<<<<<<<<<<<<< + * # done. go home + * return [(s.real, s.imag) for s in spline] +*/ + } + } + + /* "fontTools/cu2qu/cu2qu.py":500 + * return [(s.real, s.imag) for s in spline] + * + * raise ApproxNotFoundError(curve) # <<<<<<<<<<<<<< + * + * +*/ + __pyx_t_1 = NULL; + __Pyx_GetModuleGlobalName(__pyx_t_11, __pyx_mstate_global->__pyx_n_u_ApproxNotFoundError); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 500, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_11); + __pyx_t_12 = 1; + #if CYTHON_UNPACK_METHODS + if (unlikely(PyMethod_Check(__pyx_t_11))) { + __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_11); + assert(__pyx_t_1); + PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_11); + __Pyx_INCREF(__pyx_t_1); + __Pyx_INCREF(__pyx__function); + __Pyx_DECREF_SET(__pyx_t_11, __pyx__function); + __pyx_t_12 = 0; + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_v_curve}; + __pyx_t_2 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_11, __pyx_callargs+__pyx_t_12, (2-__pyx_t_12) | (__pyx_t_12*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; + if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 500, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + } + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(0, 500, __pyx_L1_error) + + /* "fontTools/cu2qu/cu2qu.py":468 + * + * + * @cython.locals(max_err=cython.double) # <<<<<<<<<<<<<< + * @cython.locals(n=cython.int) + * @cython.locals(all_quadratic=cython.int) +*/ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_11); + __Pyx_AddTraceback("fontTools.cu2qu.cu2qu.curve_to_quadratic", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_spline); + __Pyx_XDECREF(__pyx_7genexpr__pyx_v_p); + __Pyx_XDECREF(__pyx_8genexpr1__pyx_v_s); + __Pyx_XDECREF(__pyx_v_curve); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "fontTools/cu2qu/cu2qu.py":503 + * + * + * @cython.locals(l=cython.int, last_i=cython.int, i=cython.int) # <<<<<<<<<<<<<< + * @cython.locals(all_quadratic=cython.int) + * def curves_to_quadratic(curves, max_errors, all_quadratic=True): +*/ + +/* Python wrapper */ +static PyObject *__pyx_pw_9fontTools_5cu2qu_5cu2qu_6curves_to_quadratic(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +PyDoc_STRVAR(__pyx_doc_9fontTools_5cu2qu_5cu2qu_5curves_to_quadratic, "curves_to_quadratic(curves, max_errors, int all_quadratic=True)\n\nReturn quadratic Bezier splines approximating the input cubic Beziers.\n\nArgs:\n curves: A sequence of *n* curves, each curve being a sequence of four\n 2D tuples.\n max_errors: A sequence of *n* floats representing the maximum permissible\n deviation from each of the cubic Bezier curves.\n all_quadratic (bool): If True (default) returned values are a\n quadratic spline. If False, they are either a single quadratic\n curve or a single cubic curve.\n\nExample::\n\n >>> curves_to_quadratic( [\n ... [ (50,50), (100,100), (150,100), (200,50) ],\n ... [ (75,50), (120,100), (150,75), (200,60) ]\n ... ], [1,1] )\n [[(50.0, 50.0), (75.0, 75.0), (125.0, 91.66666666666666), (175.0, 75.0), (200.0, 50.0)], [(75.0, 50.0), (97.5, 75.0), (135.41666666666666, 82.08333333333333), (175.0, 67.5), (200.0, 60.0)]]\n\nThe returned splines have \"implied oncurve points\" suitable for use in\nTrueType ``glif`` outlines - i.e. in the first spline returned above,\nthe first quadratic segment runs from (50,50) to\n( (75 + 125)/2 , (120 + 91.666..)/2 ) = (100, 83.333...).\n\nReturns:\n If all_quadratic is True, a list of splines, each spline being a list\n of 2D tuples.\n\n If all_quadratic is False, a list of curves, each curve being a quadratic\n (length 3), or cubic (length 4).\n\nRaises:\n fontTools.cu2qu.Errors.ApproxNotFoundError: if no suitable approximation\n can be found for all curves with the given parameters."); +static PyMethodDef __pyx_mdef_9fontTools_5cu2qu_5cu2qu_6curves_to_quadratic = {"curves_to_quadratic", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_9fontTools_5cu2qu_5cu2qu_6curves_to_quadratic, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_9fontTools_5cu2qu_5cu2qu_5curves_to_quadratic}; +static PyObject *__pyx_pw_9fontTools_5cu2qu_5cu2qu_6curves_to_quadratic(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + PyObject *__pyx_v_curves = 0; + PyObject *__pyx_v_max_errors = 0; + int __pyx_v_all_quadratic; + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[3] = {0,0,0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("curves_to_quadratic (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_SIZE + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + { + PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_curves,&__pyx_mstate_global->__pyx_n_u_max_errors,&__pyx_mstate_global->__pyx_n_u_all_quadratic,0}; + const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; + if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 503, __pyx_L3_error) + if (__pyx_kwds_len > 0) { + switch (__pyx_nargs) { + case 3: + values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); + if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 503, __pyx_L3_error) + CYTHON_FALLTHROUGH; + case 2: + values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); + if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 503, __pyx_L3_error) + CYTHON_FALLTHROUGH; + case 1: + values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); + if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 503, __pyx_L3_error) + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "curves_to_quadratic", 0) < (0)) __PYX_ERR(0, 503, __pyx_L3_error) + for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { + if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("curves_to_quadratic", 0, 2, 3, i); __PYX_ERR(0, 503, __pyx_L3_error) } + } + } else { + switch (__pyx_nargs) { + case 3: + values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); + if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 503, __pyx_L3_error) + CYTHON_FALLTHROUGH; + case 2: + values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); + if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 503, __pyx_L3_error) + values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); + if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 503, __pyx_L3_error) + break; + default: goto __pyx_L5_argtuple_error; + } + } + __pyx_v_curves = values[0]; + __pyx_v_max_errors = values[1]; + if (values[2]) { + __pyx_v_all_quadratic = __Pyx_PyLong_As_int(values[2]); if (unlikely((__pyx_v_all_quadratic == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 505, __pyx_L3_error) + } else { + + /* "fontTools/cu2qu/cu2qu.py":505 + * @cython.locals(l=cython.int, last_i=cython.int, i=cython.int) + * @cython.locals(all_quadratic=cython.int) + * def curves_to_quadratic(curves, max_errors, all_quadratic=True): # <<<<<<<<<<<<<< + * """Return quadratic Bezier splines approximating the input cubic Beziers. + * +*/ + __pyx_v_all_quadratic = ((int)((int)1)); + } + } + goto __pyx_L6_skip; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("curves_to_quadratic", 0, 2, 3, __pyx_nargs); __PYX_ERR(0, 503, __pyx_L3_error) + __pyx_L6_skip:; + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + Py_XDECREF(values[__pyx_temp]); + } + __Pyx_AddTraceback("fontTools.cu2qu.cu2qu.curves_to_quadratic", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_9fontTools_5cu2qu_5cu2qu_5curves_to_quadratic(__pyx_self, __pyx_v_curves, __pyx_v_max_errors, __pyx_v_all_quadratic); + + /* "fontTools/cu2qu/cu2qu.py":503 + * + * + * @cython.locals(l=cython.int, last_i=cython.int, i=cython.int) # <<<<<<<<<<<<<< + * @cython.locals(all_quadratic=cython.int) + * def curves_to_quadratic(curves, max_errors, all_quadratic=True): +*/ + + /* function exit code */ + for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + Py_XDECREF(values[__pyx_temp]); + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_9fontTools_5cu2qu_5cu2qu_5curves_to_quadratic(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_curves, PyObject *__pyx_v_max_errors, int __pyx_v_all_quadratic) { + int __pyx_v_l; + int __pyx_v_last_i; + int __pyx_v_i; + PyObject *__pyx_v_splines = NULL; + PyObject *__pyx_v_n = NULL; + PyObject *__pyx_v_spline = NULL; + PyObject *__pyx_8genexpr2__pyx_v_curve = NULL; + PyObject *__pyx_8genexpr3__pyx_v_p = NULL; + PyObject *__pyx_8genexpr4__pyx_v_spline = NULL; + PyObject *__pyx_8genexpr5__pyx_v_s = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + Py_ssize_t __pyx_t_3; + PyObject *(*__pyx_t_4)(PyObject *); + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + Py_ssize_t __pyx_t_7; + PyObject *(*__pyx_t_8)(PyObject *); + PyObject *__pyx_t_9 = NULL; + PyObject *__pyx_t_10 = NULL; + int __pyx_t_11; + int __pyx_t_12; + double __pyx_t_13; + long __pyx_t_14; + PyObject *__pyx_t_15 = NULL; + size_t __pyx_t_16; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("curves_to_quadratic", 0); + __Pyx_INCREF(__pyx_v_curves); + + /* "fontTools/cu2qu/cu2qu.py":542 + * """ + * + * curves = [[complex(*p) for p in curve] for curve in curves] # <<<<<<<<<<<<<< + * assert len(max_errors) == len(curves) + * +*/ + { /* enter inner scope */ + __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 542, __pyx_L5_error) + __Pyx_GOTREF(__pyx_t_1); + if (likely(PyList_CheckExact(__pyx_v_curves)) || PyTuple_CheckExact(__pyx_v_curves)) { + __pyx_t_2 = __pyx_v_curves; __Pyx_INCREF(__pyx_t_2); + __pyx_t_3 = 0; + __pyx_t_4 = NULL; + } else { + __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_curves); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 542, __pyx_L5_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 542, __pyx_L5_error) + } + for (;;) { + if (likely(!__pyx_t_4)) { + if (likely(PyList_CheckExact(__pyx_t_2))) { + { + Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_2); + #if !CYTHON_ASSUME_SAFE_SIZE + if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 542, __pyx_L5_error) + #endif + if (__pyx_t_3 >= __pyx_temp) break; + } + __pyx_t_5 = __Pyx_PyList_GetItemRefFast(__pyx_t_2, __pyx_t_3, __Pyx_ReferenceSharing_OwnStrongReference); + ++__pyx_t_3; + } else { + { + Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_2); + #if !CYTHON_ASSUME_SAFE_SIZE + if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 542, __pyx_L5_error) + #endif + if (__pyx_t_3 >= __pyx_temp) break; + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_5 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3)); + #else + __pyx_t_5 = __Pyx_PySequence_ITEM(__pyx_t_2, __pyx_t_3); + #endif + ++__pyx_t_3; + } + if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 542, __pyx_L5_error) + } else { + __pyx_t_5 = __pyx_t_4(__pyx_t_2); + if (unlikely(!__pyx_t_5)) { + PyObject* exc_type = PyErr_Occurred(); + if (exc_type) { + if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 542, __pyx_L5_error) + PyErr_Clear(); + } + break; + } + } + __Pyx_GOTREF(__pyx_t_5); + __Pyx_XDECREF_SET(__pyx_8genexpr2__pyx_v_curve, __pyx_t_5); + __pyx_t_5 = 0; + { /* enter inner scope */ + __pyx_t_5 = PyList_New(0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 542, __pyx_L10_error) + __Pyx_GOTREF(__pyx_t_5); + if (likely(PyList_CheckExact(__pyx_8genexpr2__pyx_v_curve)) || PyTuple_CheckExact(__pyx_8genexpr2__pyx_v_curve)) { + __pyx_t_6 = __pyx_8genexpr2__pyx_v_curve; __Pyx_INCREF(__pyx_t_6); + __pyx_t_7 = 0; + __pyx_t_8 = NULL; + } else { + __pyx_t_7 = -1; __pyx_t_6 = PyObject_GetIter(__pyx_8genexpr2__pyx_v_curve); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 542, __pyx_L10_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_8 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 542, __pyx_L10_error) + } + for (;;) { + if (likely(!__pyx_t_8)) { + if (likely(PyList_CheckExact(__pyx_t_6))) { + { + Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_6); + #if !CYTHON_ASSUME_SAFE_SIZE + if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 542, __pyx_L10_error) + #endif + if (__pyx_t_7 >= __pyx_temp) break; + } + __pyx_t_9 = __Pyx_PyList_GetItemRefFast(__pyx_t_6, __pyx_t_7, __Pyx_ReferenceSharing_OwnStrongReference); + ++__pyx_t_7; + } else { + { + Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_6); + #if !CYTHON_ASSUME_SAFE_SIZE + if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 542, __pyx_L10_error) + #endif + if (__pyx_t_7 >= __pyx_temp) break; + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_9 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_6, __pyx_t_7)); + #else + __pyx_t_9 = __Pyx_PySequence_ITEM(__pyx_t_6, __pyx_t_7); + #endif + ++__pyx_t_7; + } + if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 542, __pyx_L10_error) + } else { + __pyx_t_9 = __pyx_t_8(__pyx_t_6); + if (unlikely(!__pyx_t_9)) { + PyObject* exc_type = PyErr_Occurred(); + if (exc_type) { + if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 542, __pyx_L10_error) + PyErr_Clear(); + } + break; + } + } + __Pyx_GOTREF(__pyx_t_9); + __Pyx_XDECREF_SET(__pyx_8genexpr3__pyx_v_p, __pyx_t_9); + __pyx_t_9 = 0; + __pyx_t_9 = __Pyx_PySequence_Tuple(__pyx_8genexpr3__pyx_v_p); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 542, __pyx_L10_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_10 = __Pyx_PyObject_Call(((PyObject *)(&PyComplex_Type)), __pyx_t_9, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 542, __pyx_L10_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + if (unlikely(__Pyx_ListComp_Append(__pyx_t_5, (PyObject*)__pyx_t_10))) __PYX_ERR(0, 542, __pyx_L10_error) + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + } + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_XDECREF(__pyx_8genexpr3__pyx_v_p); __pyx_8genexpr3__pyx_v_p = 0; + goto __pyx_L14_exit_scope; + __pyx_L10_error:; + __Pyx_XDECREF(__pyx_8genexpr3__pyx_v_p); __pyx_8genexpr3__pyx_v_p = 0; + goto __pyx_L5_error; + __pyx_L14_exit_scope:; + } /* exit inner scope */ + if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(0, 542, __pyx_L5_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + } + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_8genexpr2__pyx_v_curve); __pyx_8genexpr2__pyx_v_curve = 0; + goto __pyx_L16_exit_scope; + __pyx_L5_error:; + __Pyx_XDECREF(__pyx_8genexpr2__pyx_v_curve); __pyx_8genexpr2__pyx_v_curve = 0; + goto __pyx_L1_error; + __pyx_L16_exit_scope:; + } /* exit inner scope */ + __Pyx_DECREF_SET(__pyx_v_curves, __pyx_t_1); + __pyx_t_1 = 0; + + /* "fontTools/cu2qu/cu2qu.py":543 + * + * curves = [[complex(*p) for p in curve] for curve in curves] + * assert len(max_errors) == len(curves) # <<<<<<<<<<<<<< + * + * l = len(curves) +*/ + #ifndef CYTHON_WITHOUT_ASSERTIONS + if (unlikely(__pyx_assertions_enabled())) { + __pyx_t_3 = PyObject_Length(__pyx_v_max_errors); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(0, 543, __pyx_L1_error) + __pyx_t_7 = PyObject_Length(__pyx_v_curves); if (unlikely(__pyx_t_7 == ((Py_ssize_t)-1))) __PYX_ERR(0, 543, __pyx_L1_error) + __pyx_t_11 = (__pyx_t_3 == __pyx_t_7); + if (unlikely(!__pyx_t_11)) { + __Pyx_Raise(((PyObject *)(((PyTypeObject*)PyExc_AssertionError))), 0, 0, 0); + __PYX_ERR(0, 543, __pyx_L1_error) + } + } + #else + if ((1)); else __PYX_ERR(0, 543, __pyx_L1_error) + #endif + + /* "fontTools/cu2qu/cu2qu.py":545 + * assert len(max_errors) == len(curves) + * + * l = len(curves) # <<<<<<<<<<<<<< + * splines = [None] * l + * last_i = i = 0 +*/ + __pyx_t_7 = PyObject_Length(__pyx_v_curves); if (unlikely(__pyx_t_7 == ((Py_ssize_t)-1))) __PYX_ERR(0, 545, __pyx_L1_error) + __pyx_v_l = __pyx_t_7; + + /* "fontTools/cu2qu/cu2qu.py":546 + * + * l = len(curves) + * splines = [None] * l # <<<<<<<<<<<<<< + * last_i = i = 0 + * n = 1 +*/ + __pyx_t_1 = PyList_New(1 * ((__pyx_v_l<0) ? 0:__pyx_v_l)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 546, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + { Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < __pyx_v_l; __pyx_temp++) { + __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(Py_None); + if (__Pyx_PyList_SET_ITEM(__pyx_t_1, __pyx_temp, Py_None) != (0)) __PYX_ERR(0, 546, __pyx_L1_error); + } + } + __pyx_v_splines = ((PyObject*)__pyx_t_1); + __pyx_t_1 = 0; + + /* "fontTools/cu2qu/cu2qu.py":547 + * l = len(curves) + * splines = [None] * l + * last_i = i = 0 # <<<<<<<<<<<<<< + * n = 1 + * while True: +*/ + __pyx_v_last_i = 0; + __pyx_v_i = 0; + + /* "fontTools/cu2qu/cu2qu.py":548 + * splines = [None] * l + * last_i = i = 0 + * n = 1 # <<<<<<<<<<<<<< + * while True: + * spline = cubic_approx_spline(curves[i], n, max_errors[i], all_quadratic) +*/ + __Pyx_INCREF(__pyx_mstate_global->__pyx_int_1); + __pyx_v_n = __pyx_mstate_global->__pyx_int_1; + + /* "fontTools/cu2qu/cu2qu.py":549 + * last_i = i = 0 + * n = 1 + * while True: # <<<<<<<<<<<<<< + * spline = cubic_approx_spline(curves[i], n, max_errors[i], all_quadratic) + * if spline is None: +*/ + while (1) { + + /* "fontTools/cu2qu/cu2qu.py":550 + * n = 1 + * while True: + * spline = cubic_approx_spline(curves[i], n, max_errors[i], all_quadratic) # <<<<<<<<<<<<<< + * if spline is None: + * if n == MAX_N: +*/ + __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_curves, __pyx_v_i, int, 1, __Pyx_PyLong_From_int, 0, 1, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 550, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_12 = __Pyx_PyLong_As_int(__pyx_v_n); if (unlikely((__pyx_t_12 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 550, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_max_errors, __pyx_v_i, int, 1, __Pyx_PyLong_From_int, 0, 1, 1, 1, __Pyx_ReferenceSharing_FunctionArgument); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 550, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_13 = __Pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_13 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 550, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __pyx_f_9fontTools_5cu2qu_5cu2qu_cubic_approx_spline(__pyx_t_1, __pyx_t_12, __pyx_t_13, __pyx_v_all_quadratic); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 550, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF_SET(__pyx_v_spline, __pyx_t_2); + __pyx_t_2 = 0; + + /* "fontTools/cu2qu/cu2qu.py":551 + * while True: + * spline = cubic_approx_spline(curves[i], n, max_errors[i], all_quadratic) + * if spline is None: # <<<<<<<<<<<<<< + * if n == MAX_N: + * break +*/ + __pyx_t_11 = (__pyx_v_spline == Py_None); + if (__pyx_t_11) { + + /* "fontTools/cu2qu/cu2qu.py":552 + * spline = cubic_approx_spline(curves[i], n, max_errors[i], all_quadratic) + * if spline is None: + * if n == MAX_N: # <<<<<<<<<<<<<< + * break + * n += 1 +*/ + __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_MAX_N); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 552, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_1 = PyObject_RichCompare(__pyx_v_n, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 552, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely((__pyx_t_11 < 0))) __PYX_ERR(0, 552, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + if (__pyx_t_11) { + + /* "fontTools/cu2qu/cu2qu.py":553 + * if spline is None: + * if n == MAX_N: + * break # <<<<<<<<<<<<<< + * n += 1 + * last_i = i +*/ + goto __pyx_L18_break; + + /* "fontTools/cu2qu/cu2qu.py":552 + * spline = cubic_approx_spline(curves[i], n, max_errors[i], all_quadratic) + * if spline is None: + * if n == MAX_N: # <<<<<<<<<<<<<< + * break + * n += 1 +*/ + } + + /* "fontTools/cu2qu/cu2qu.py":554 + * if n == MAX_N: + * break + * n += 1 # <<<<<<<<<<<<<< + * last_i = i + * continue +*/ + __pyx_t_1 = __Pyx_PyLong_AddObjC(__pyx_v_n, __pyx_mstate_global->__pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 554, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF_SET(__pyx_v_n, __pyx_t_1); + __pyx_t_1 = 0; + + /* "fontTools/cu2qu/cu2qu.py":555 + * break + * n += 1 + * last_i = i # <<<<<<<<<<<<<< + * continue + * splines[i] = spline +*/ + __pyx_v_last_i = __pyx_v_i; + + /* "fontTools/cu2qu/cu2qu.py":556 + * n += 1 + * last_i = i + * continue # <<<<<<<<<<<<<< + * splines[i] = spline + * i = (i + 1) % l +*/ + goto __pyx_L17_continue; + + /* "fontTools/cu2qu/cu2qu.py":551 + * while True: + * spline = cubic_approx_spline(curves[i], n, max_errors[i], all_quadratic) + * if spline is None: # <<<<<<<<<<<<<< + * if n == MAX_N: + * break +*/ + } + + /* "fontTools/cu2qu/cu2qu.py":557 + * last_i = i + * continue + * splines[i] = spline # <<<<<<<<<<<<<< + * i = (i + 1) % l + * if i == last_i: +*/ + if (unlikely((__Pyx_SetItemInt(__pyx_v_splines, __pyx_v_i, __pyx_v_spline, int, 1, __Pyx_PyLong_From_int, 1, 1, 1, 1, __Pyx_ReferenceSharing_OwnStrongReference) < 0))) __PYX_ERR(0, 557, __pyx_L1_error) + + /* "fontTools/cu2qu/cu2qu.py":558 + * continue + * splines[i] = spline + * i = (i + 1) % l # <<<<<<<<<<<<<< + * if i == last_i: + * # done. go home +*/ + __pyx_t_14 = (__pyx_v_i + 1); + if (unlikely(__pyx_v_l == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); + __PYX_ERR(0, 558, __pyx_L1_error) + } + __pyx_v_i = __Pyx_mod_long(__pyx_t_14, __pyx_v_l, 0); + + /* "fontTools/cu2qu/cu2qu.py":559 + * splines[i] = spline + * i = (i + 1) % l + * if i == last_i: # <<<<<<<<<<<<<< + * # done. go home + * return [[(s.real, s.imag) for s in spline] for spline in splines] +*/ + __pyx_t_11 = (__pyx_v_i == __pyx_v_last_i); + if (__pyx_t_11) { + + /* "fontTools/cu2qu/cu2qu.py":561 + * if i == last_i: + * # done. go home + * return [[(s.real, s.imag) for s in spline] for spline in splines] # <<<<<<<<<<<<<< + * + * raise ApproxNotFoundError(curves) +*/ + __Pyx_XDECREF(__pyx_r); + { /* enter inner scope */ + __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 561, __pyx_L24_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __pyx_v_splines; __Pyx_INCREF(__pyx_t_2); + __pyx_t_7 = 0; + for (;;) { + { + Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_2); + #if !CYTHON_ASSUME_SAFE_SIZE + if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 561, __pyx_L24_error) + #endif + if (__pyx_t_7 >= __pyx_temp) break; + } + __pyx_t_5 = __Pyx_PyList_GetItemRefFast(__pyx_t_2, __pyx_t_7, __Pyx_ReferenceSharing_OwnStrongReference); + ++__pyx_t_7; + if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 561, __pyx_L24_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_XDECREF_SET(__pyx_8genexpr4__pyx_v_spline, __pyx_t_5); + __pyx_t_5 = 0; + { /* enter inner scope */ + __pyx_t_5 = PyList_New(0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 561, __pyx_L29_error) + __Pyx_GOTREF(__pyx_t_5); + if (likely(PyList_CheckExact(__pyx_8genexpr4__pyx_v_spline)) || PyTuple_CheckExact(__pyx_8genexpr4__pyx_v_spline)) { + __pyx_t_6 = __pyx_8genexpr4__pyx_v_spline; __Pyx_INCREF(__pyx_t_6); + __pyx_t_3 = 0; + __pyx_t_4 = NULL; + } else { + __pyx_t_3 = -1; __pyx_t_6 = PyObject_GetIter(__pyx_8genexpr4__pyx_v_spline); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 561, __pyx_L29_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_4 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_6); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 561, __pyx_L29_error) + } + for (;;) { + if (likely(!__pyx_t_4)) { + if (likely(PyList_CheckExact(__pyx_t_6))) { + { + Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_6); + #if !CYTHON_ASSUME_SAFE_SIZE + if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 561, __pyx_L29_error) + #endif + if (__pyx_t_3 >= __pyx_temp) break; + } + __pyx_t_10 = __Pyx_PyList_GetItemRefFast(__pyx_t_6, __pyx_t_3, __Pyx_ReferenceSharing_OwnStrongReference); + ++__pyx_t_3; + } else { + { + Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_6); + #if !CYTHON_ASSUME_SAFE_SIZE + if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 561, __pyx_L29_error) + #endif + if (__pyx_t_3 >= __pyx_temp) break; + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_10 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_6, __pyx_t_3)); + #else + __pyx_t_10 = __Pyx_PySequence_ITEM(__pyx_t_6, __pyx_t_3); + #endif + ++__pyx_t_3; + } + if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 561, __pyx_L29_error) + } else { + __pyx_t_10 = __pyx_t_4(__pyx_t_6); + if (unlikely(!__pyx_t_10)) { + PyObject* exc_type = PyErr_Occurred(); + if (exc_type) { + if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 561, __pyx_L29_error) + PyErr_Clear(); + } + break; + } + } + __Pyx_GOTREF(__pyx_t_10); + __Pyx_XDECREF_SET(__pyx_8genexpr5__pyx_v_s, __pyx_t_10); + __pyx_t_10 = 0; + __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_8genexpr5__pyx_v_s, __pyx_mstate_global->__pyx_n_u_real); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 561, __pyx_L29_error) + __Pyx_GOTREF(__pyx_t_10); + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_8genexpr5__pyx_v_s, __pyx_mstate_global->__pyx_n_u_imag); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 561, __pyx_L29_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_15 = PyTuple_New(2); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 561, __pyx_L29_error) + __Pyx_GOTREF(__pyx_t_15); + __Pyx_GIVEREF(__pyx_t_10); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_15, 0, __pyx_t_10) != (0)) __PYX_ERR(0, 561, __pyx_L29_error); + __Pyx_GIVEREF(__pyx_t_9); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_15, 1, __pyx_t_9) != (0)) __PYX_ERR(0, 561, __pyx_L29_error); + __pyx_t_10 = 0; + __pyx_t_9 = 0; + if (unlikely(__Pyx_ListComp_Append(__pyx_t_5, (PyObject*)__pyx_t_15))) __PYX_ERR(0, 561, __pyx_L29_error) + __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; + } + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_XDECREF(__pyx_8genexpr5__pyx_v_s); __pyx_8genexpr5__pyx_v_s = 0; + goto __pyx_L33_exit_scope; + __pyx_L29_error:; + __Pyx_XDECREF(__pyx_8genexpr5__pyx_v_s); __pyx_8genexpr5__pyx_v_s = 0; + goto __pyx_L24_error; + __pyx_L33_exit_scope:; + } /* exit inner scope */ + if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(0, 561, __pyx_L24_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + } + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_8genexpr4__pyx_v_spline); __pyx_8genexpr4__pyx_v_spline = 0; + goto __pyx_L35_exit_scope; + __pyx_L24_error:; + __Pyx_XDECREF(__pyx_8genexpr4__pyx_v_spline); __pyx_8genexpr4__pyx_v_spline = 0; + goto __pyx_L1_error; + __pyx_L35_exit_scope:; + } /* exit inner scope */ + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "fontTools/cu2qu/cu2qu.py":559 + * splines[i] = spline + * i = (i + 1) % l + * if i == last_i: # <<<<<<<<<<<<<< + * # done. go home + * return [[(s.real, s.imag) for s in spline] for spline in splines] +*/ + } + __pyx_L17_continue:; + } + __pyx_L18_break:; + + /* "fontTools/cu2qu/cu2qu.py":563 + * return [[(s.real, s.imag) for s in spline] for spline in splines] + * + * raise ApproxNotFoundError(curves) # <<<<<<<<<<<<<< +*/ + __pyx_t_2 = NULL; + __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_ApproxNotFoundError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 563, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_16 = 1; + #if CYTHON_UNPACK_METHODS + if (unlikely(PyMethod_Check(__pyx_t_5))) { + __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_5); + assert(__pyx_t_2); + PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5); + __Pyx_INCREF(__pyx_t_2); + __Pyx_INCREF(__pyx__function); + __Pyx_DECREF_SET(__pyx_t_5, __pyx__function); + __pyx_t_16 = 0; + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_curves}; + __pyx_t_1 = __Pyx_PyObject_FastCall((PyObject*)__pyx_t_5, __pyx_callargs+__pyx_t_16, (2-__pyx_t_16) | (__pyx_t_16*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 563, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + } + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(0, 563, __pyx_L1_error) + + /* "fontTools/cu2qu/cu2qu.py":503 + * + * + * @cython.locals(l=cython.int, last_i=cython.int, i=cython.int) # <<<<<<<<<<<<<< + * @cython.locals(all_quadratic=cython.int) + * def curves_to_quadratic(curves, max_errors, all_quadratic=True): +*/ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_9); + __Pyx_XDECREF(__pyx_t_10); + __Pyx_XDECREF(__pyx_t_15); + __Pyx_AddTraceback("fontTools.cu2qu.cu2qu.curves_to_quadratic", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_splines); + __Pyx_XDECREF(__pyx_v_n); + __Pyx_XDECREF(__pyx_v_spline); + __Pyx_XDECREF(__pyx_8genexpr2__pyx_v_curve); + __Pyx_XDECREF(__pyx_8genexpr3__pyx_v_p); + __Pyx_XDECREF(__pyx_8genexpr4__pyx_v_spline); + __Pyx_XDECREF(__pyx_8genexpr5__pyx_v_s); + __Pyx_XDECREF(__pyx_v_curves); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} +/* #### Code section: module_exttypes ### */ + +static PyObject *__pyx_tp_new_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { + PyObject *o; + #if CYTHON_USE_FREELISTS + if (likely((int)(__pyx_mstate_global->__pyx_freecount_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen > 0) & __PYX_CHECK_FINAL_TYPE_FOR_FREELISTS(t, __pyx_mstate_global->__pyx_ptype_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen, sizeof(struct __pyx_obj_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen)))) + { + o = (PyObject*)__pyx_mstate_global->__pyx_freelist_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen[--__pyx_mstate_global->__pyx_freecount_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen]; + #if CYTHON_USE_TYPE_SPECS + Py_DECREF(Py_TYPE(o)); + #endif + memset(o, 0, sizeof(struct __pyx_obj_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen)); + #if CYTHON_COMPILING_IN_LIMITED_API + (void) PyObject_Init(o, t); + #else + (void) PyObject_INIT(o, t); + #endif + } else + #endif + { + o = __Pyx_AllocateExtensionType(t, 1); + if (unlikely(!o)) return 0; + } + return o; +} + +static void __pyx_tp_dealloc_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen(PyObject *o) { + #if CYTHON_USE_TP_FINALIZE + if (unlikely(__Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && (!PyType_IS_GC(Py_TYPE(o)) || !__Pyx_PyObject_GC_IsFinalized(o))) { + if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen) { + if (PyObject_CallFinalizerFromDealloc(o)) return; + } + } + #endif + #if CYTHON_USE_FREELISTS + if (likely((int)(__pyx_mstate_global->__pyx_freecount_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen < 8) & __PYX_CHECK_FINAL_TYPE_FOR_FREELISTS(Py_TYPE(o), __pyx_mstate_global->__pyx_ptype_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen, sizeof(struct __pyx_obj_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen)))) + { + __pyx_mstate_global->__pyx_freelist_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen[__pyx_mstate_global->__pyx_freecount_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen++] = ((struct __pyx_obj_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen *)o); + } else + #endif + { + PyTypeObject *tp = Py_TYPE(o); + #if CYTHON_USE_TYPE_SLOTS + (*tp->tp_free)(o); + #else + { + freefunc tp_free = (freefunc)PyType_GetSlot(tp, Py_tp_free); + if (tp_free) tp_free(o); + } + #endif + #if CYTHON_USE_TYPE_SPECS + Py_DECREF(tp); + #endif + } +} +#if CYTHON_USE_TYPE_SPECS +static PyType_Slot __pyx_type_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen_slots[] = { + {Py_tp_dealloc, (void *)__pyx_tp_dealloc_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen}, + {Py_tp_new, (void *)__pyx_tp_new_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen}, + {0, 0}, +}; +static PyType_Spec __pyx_type_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen_spec = { + "fontTools.cu2qu.cu2qu.__pyx_scope_struct___split_cubic_into_n_gen", + sizeof(struct __pyx_obj_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen), + 0, + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER, + __pyx_type_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen_slots, +}; +#else + +static PyTypeObject __pyx_type_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen = { + PyVarObject_HEAD_INIT(0, 0) + "fontTools.cu2qu.cu2qu.""__pyx_scope_struct___split_cubic_into_n_gen", /*tp_name*/ + sizeof(struct __pyx_obj_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen, /*tp_dealloc*/ + 0, /*tp_vectorcall_offset*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_as_async*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER, /*tp_flags*/ + 0, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + 0, /*tp_methods*/ + 0, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + #if !CYTHON_USE_TYPE_SPECS + 0, /*tp_dictoffset*/ + #endif + 0, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ + #if CYTHON_USE_TP_FINALIZE + 0, /*tp_finalize*/ + #else + NULL, /*tp_finalize*/ + #endif + #if !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800 + 0, /*tp_vectorcall*/ + #endif + #if __PYX_NEED_TP_PRINT_SLOT == 1 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030C0000 + 0, /*tp_watched*/ + #endif + #if PY_VERSION_HEX >= 0x030d00A4 + 0, /*tp_versions_used*/ + #endif + #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000 + 0, /*tp_pypy_flags*/ + #endif +}; +#endif + +static PyMethodDef __pyx_methods[] = { + {0, 0, 0, 0} +}; +/* #### Code section: initfunc_declarations ### */ +static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(__pyx_mstatetype *__pyx_mstate); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(__pyx_mstatetype *__pyx_mstate); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_InitConstants(__pyx_mstatetype *__pyx_mstate); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(__pyx_mstatetype *__pyx_mstate); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(__pyx_mstatetype *__pyx_mstate); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(__pyx_mstatetype *__pyx_mstate); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(__pyx_mstatetype *__pyx_mstate); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(__pyx_mstatetype *__pyx_mstate); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(__pyx_mstatetype *__pyx_mstate); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(__pyx_mstatetype *__pyx_mstate); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_CreateCodeObjects(__pyx_mstatetype *__pyx_mstate); /*proto*/ +/* #### Code section: init_module ### */ + +static int __Pyx_modinit_global_init_code(__pyx_mstatetype *__pyx_mstate) { + __Pyx_RefNannyDeclarations + CYTHON_UNUSED_VAR(__pyx_mstate); + __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); + /*--- Global init code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_variable_export_code(__pyx_mstatetype *__pyx_mstate) { + __Pyx_RefNannyDeclarations + CYTHON_UNUSED_VAR(__pyx_mstate); + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); + /*--- Variable export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_export_code(__pyx_mstatetype *__pyx_mstate) { + __Pyx_RefNannyDeclarations + CYTHON_UNUSED_VAR(__pyx_mstate); + __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); + /*--- Function export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_type_init_code(__pyx_mstatetype *__pyx_mstate) { + __Pyx_RefNannyDeclarations + CYTHON_UNUSED_VAR(__pyx_mstate); + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); + /*--- Type init code ---*/ + #if CYTHON_USE_TYPE_SPECS + __pyx_mstate->__pyx_ptype_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen_spec, NULL); if (unlikely(!__pyx_mstate->__pyx_ptype_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen)) __PYX_ERR(0, 150, __pyx_L1_error) + if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen_spec, __pyx_mstate->__pyx_ptype_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen) < (0)) __PYX_ERR(0, 150, __pyx_L1_error) + #else + __pyx_mstate->__pyx_ptype_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen = &__pyx_type_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen; + #endif + #if !CYTHON_COMPILING_IN_LIMITED_API + #endif + #if !CYTHON_USE_TYPE_SPECS + if (__Pyx_PyType_Ready(__pyx_mstate->__pyx_ptype_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen) < (0)) __PYX_ERR(0, 150, __pyx_L1_error) + #endif + #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000 + PyUnstable_Object_EnableDeferredRefcount((PyObject*)__pyx_mstate->__pyx_ptype_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen); + #endif + #if !CYTHON_COMPILING_IN_LIMITED_API + if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_mstate->__pyx_ptype_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen->tp_dictoffset && __pyx_mstate->__pyx_ptype_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen->tp_getattro == PyObject_GenericGetAttr)) { + __pyx_mstate->__pyx_ptype_9fontTools_5cu2qu_5cu2qu___pyx_scope_struct___split_cubic_into_n_gen->tp_getattro = PyObject_GenericGetAttr; + } + #endif + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static int __Pyx_modinit_type_import_code(__pyx_mstatetype *__pyx_mstate) { + __Pyx_RefNannyDeclarations + CYTHON_UNUSED_VAR(__pyx_mstate); + __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); + /*--- Type import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_variable_import_code(__pyx_mstatetype *__pyx_mstate) { + __Pyx_RefNannyDeclarations + CYTHON_UNUSED_VAR(__pyx_mstate); + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); + /*--- Variable import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_import_code(__pyx_mstatetype *__pyx_mstate) { + __Pyx_RefNannyDeclarations + CYTHON_UNUSED_VAR(__pyx_mstate); + __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); + /*--- Function import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +#if CYTHON_PEP489_MULTI_PHASE_INIT +static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ +static int __pyx_pymod_exec_cu2qu(PyObject* module); /*proto*/ +static PyModuleDef_Slot __pyx_moduledef_slots[] = { + {Py_mod_create, (void*)__pyx_pymod_create}, + {Py_mod_exec, (void*)__pyx_pymod_exec_cu2qu}, + #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING + {Py_mod_gil, Py_MOD_GIL_USED}, + #endif + #if PY_VERSION_HEX >= 0x030C0000 && CYTHON_USE_MODULE_STATE + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, + #endif + {0, NULL} +}; +#endif + +#ifdef __cplusplus +namespace { + struct PyModuleDef __pyx_moduledef = + #else + static struct PyModuleDef __pyx_moduledef = + #endif + { + PyModuleDef_HEAD_INIT, + "cu2qu", + 0, /* m_doc */ + #if CYTHON_USE_MODULE_STATE + sizeof(__pyx_mstatetype), /* m_size */ + #else + (CYTHON_PEP489_MULTI_PHASE_INIT) ? 0 : -1, /* m_size */ + #endif + __pyx_methods /* m_methods */, + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_moduledef_slots, /* m_slots */ + #else + NULL, /* m_reload */ + #endif + #if CYTHON_USE_MODULE_STATE + __pyx_m_traverse, /* m_traverse */ + __pyx_m_clear, /* m_clear */ + NULL /* m_free */ + #else + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL /* m_free */ + #endif + }; + #ifdef __cplusplus +} /* anonymous namespace */ +#endif + +/* PyModInitFuncType */ +#ifndef CYTHON_NO_PYINIT_EXPORT + #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC +#else + #ifdef __cplusplus + #define __Pyx_PyMODINIT_FUNC extern "C" PyObject * + #else + #define __Pyx_PyMODINIT_FUNC PyObject * + #endif +#endif + +__Pyx_PyMODINIT_FUNC PyInit_cu2qu(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC PyInit_cu2qu(void) +#if CYTHON_PEP489_MULTI_PHASE_INIT +{ + return PyModuleDef_Init(&__pyx_moduledef); +} +/* ModuleCreationPEP489 */ +#if CYTHON_COMPILING_IN_LIMITED_API && (__PYX_LIMITED_VERSION_HEX < 0x03090000\ + || ((defined(_WIN32) || defined(WIN32) || defined(MS_WINDOWS)) && __PYX_LIMITED_VERSION_HEX < 0x030A0000)) +static PY_INT64_T __Pyx_GetCurrentInterpreterId(void) { + { + PyObject *module = PyImport_ImportModule("_interpreters"); // 3.13+ I think + if (!module) { + PyErr_Clear(); // just try the 3.8-3.12 version + module = PyImport_ImportModule("_xxsubinterpreters"); + if (!module) goto bad; + } + PyObject *current = PyObject_CallMethod(module, "get_current", NULL); + Py_DECREF(module); + if (!current) goto bad; + if (PyTuple_Check(current)) { + PyObject *new_current = PySequence_GetItem(current, 0); + Py_DECREF(current); + current = new_current; + if (!new_current) goto bad; + } + long long as_c_int = PyLong_AsLongLong(current); + Py_DECREF(current); + return as_c_int; + } + bad: + PySys_WriteStderr("__Pyx_GetCurrentInterpreterId failed. Try setting the C define CYTHON_PEP489_MULTI_PHASE_INIT=0\n"); + return -1; +} +#endif +#if !CYTHON_USE_MODULE_STATE +static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { + static PY_INT64_T main_interpreter_id = -1; +#if CYTHON_COMPILING_IN_GRAAL && defined(GRAALPY_VERSION_NUM) && GRAALPY_VERSION_NUM > 0x19000000 + PY_INT64_T current_id = GraalPyInterpreterState_GetIDFromThreadState(PyThreadState_Get()); +#elif CYTHON_COMPILING_IN_GRAAL + PY_INT64_T current_id = PyInterpreterState_GetIDFromThreadState(PyThreadState_Get()); +#elif CYTHON_COMPILING_IN_LIMITED_API && (__PYX_LIMITED_VERSION_HEX < 0x03090000\ + || ((defined(_WIN32) || defined(WIN32) || defined(MS_WINDOWS)) && __PYX_LIMITED_VERSION_HEX < 0x030A0000)) + PY_INT64_T current_id = __Pyx_GetCurrentInterpreterId(); +#elif CYTHON_COMPILING_IN_LIMITED_API + PY_INT64_T current_id = PyInterpreterState_GetID(PyInterpreterState_Get()); +#else + PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); +#endif + if (unlikely(current_id == -1)) { + return -1; + } + if (main_interpreter_id == -1) { + main_interpreter_id = current_id; + return 0; + } else if (unlikely(main_interpreter_id != current_id)) { + PyErr_SetString( + PyExc_ImportError, + "Interpreter change detected - this module can only be loaded into one interpreter per process."); + return -1; + } + return 0; +} +#endif +static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) +{ + PyObject *value = PyObject_GetAttrString(spec, from_name); + int result = 0; + if (likely(value)) { + if (allow_none || value != Py_None) { + result = PyDict_SetItemString(moddict, to_name, value); + } + Py_DECREF(value); + } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + } else { + result = -1; + } + return result; +} +static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def) { + PyObject *module = NULL, *moddict, *modname; + CYTHON_UNUSED_VAR(def); + #if !CYTHON_USE_MODULE_STATE + if (__Pyx_check_single_interpreter()) + return NULL; + #endif + if (__pyx_m) + return __Pyx_NewRef(__pyx_m); + modname = PyObject_GetAttrString(spec, "name"); + if (unlikely(!modname)) goto bad; + module = PyModule_NewObject(modname); + Py_DECREF(modname); + if (unlikely(!module)) goto bad; + moddict = PyModule_GetDict(module); + if (unlikely(!moddict)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; + return module; +bad: + Py_XDECREF(module); + return NULL; +} + + +static CYTHON_SMALL_CODE int __pyx_pymod_exec_cu2qu(PyObject *__pyx_pyinit_module) +#endif +{ + int stringtab_initialized = 0; + #if CYTHON_USE_MODULE_STATE + int pystate_addmodule_run = 0; + #endif + __pyx_mstatetype *__pyx_mstate = NULL; + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + Py_ssize_t __pyx_t_5; + PyObject *__pyx_t_6 = NULL; + double __pyx_t_7; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannyDeclarations + #if CYTHON_PEP489_MULTI_PHASE_INIT + if (__pyx_m) { + if (__pyx_m == __pyx_pyinit_module) return 0; + PyErr_SetString(PyExc_RuntimeError, "Module 'cu2qu' has already been imported. Re-initialisation is not supported."); + return -1; + } + #else + if (__pyx_m) return __Pyx_NewRef(__pyx_m); + #endif + /*--- Module creation code ---*/ + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_t_1 = __pyx_pyinit_module; + Py_INCREF(__pyx_t_1); + #else + __pyx_t_1 = PyModule_Create(&__pyx_moduledef); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #if CYTHON_USE_MODULE_STATE + { + int add_module_result = __Pyx_State_AddModule(__pyx_t_1, &__pyx_moduledef); + __pyx_t_1 = 0; /* transfer ownership from __pyx_t_1 to "cu2qu" pseudovariable */ + if (unlikely((add_module_result < 0))) __PYX_ERR(0, 1, __pyx_L1_error) + pystate_addmodule_run = 1; + } + #else + __pyx_m = __pyx_t_1; + #endif + #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING + PyUnstable_Module_SetGIL(__pyx_m, Py_MOD_GIL_USED); + #endif + __pyx_mstate = __pyx_mstate_global; + CYTHON_UNUSED_VAR(__pyx_t_1); + __pyx_mstate->__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_mstate->__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_mstate->__pyx_d); + __pyx_mstate->__pyx_b = __Pyx_PyImport_AddModuleRef(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_mstate->__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_mstate->__pyx_cython_runtime = __Pyx_PyImport_AddModuleRef("cython_runtime"); if (unlikely(!__pyx_mstate->__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_mstate->__pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /* ImportRefnannyAPI */ + #if CYTHON_REFNANNY + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); + if (!__Pyx_RefNanny) { + PyErr_Clear(); + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); + if (!__Pyx_RefNanny) + Py_FatalError("failed to import 'refnanny' module"); + } + #endif + +__Pyx_RefNannySetupContext("PyInit_cu2qu", 0); + __Pyx_init_runtime_version(); + if (__Pyx_check_binary_version(__PYX_LIMITED_VERSION_HEX, __Pyx_get_runtime_version(), CYTHON_COMPILING_IN_LIMITED_API) < (0)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_mstate->__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_mstate->__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_mstate->__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_mstate->__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_mstate->__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_mstate->__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Library function declarations ---*/ + /*--- Initialize various global constants etc. ---*/ + if (__Pyx_InitConstants(__pyx_mstate) < (0)) __PYX_ERR(0, 1, __pyx_L1_error) + stringtab_initialized = 1; + if (__Pyx_InitGlobals() < (0)) __PYX_ERR(0, 1, __pyx_L1_error) + if (__pyx_module_is_main_fontTools__cu2qu__cu2qu) { + if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_name, __pyx_mstate_global->__pyx_n_u_main) < (0)) __PYX_ERR(0, 1, __pyx_L1_error) + } + { + PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) + if (!PyDict_GetItemString(modules, "fontTools.cu2qu.cu2qu")) { + if (unlikely((PyDict_SetItemString(modules, "fontTools.cu2qu.cu2qu", __pyx_m) < 0))) __PYX_ERR(0, 1, __pyx_L1_error) + } + } + /*--- Builtin init code ---*/ + if (__Pyx_InitCachedBuiltins(__pyx_mstate) < (0)) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Constants init code ---*/ + if (__Pyx_InitCachedConstants(__pyx_mstate) < (0)) __PYX_ERR(0, 1, __pyx_L1_error) + if (__Pyx_CreateCodeObjects(__pyx_mstate) < (0)) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Global type/function init code ---*/ + (void)__Pyx_modinit_global_init_code(__pyx_mstate); + (void)__Pyx_modinit_variable_export_code(__pyx_mstate); + (void)__Pyx_modinit_function_export_code(__pyx_mstate); + if (unlikely((__Pyx_modinit_type_init_code(__pyx_mstate) < 0))) __PYX_ERR(0, 1, __pyx_L1_error) + (void)__Pyx_modinit_type_import_code(__pyx_mstate); + (void)__Pyx_modinit_variable_import_code(__pyx_mstate); + (void)__Pyx_modinit_function_import_code(__pyx_mstate); + /*--- Execution code ---*/ + + /* "fontTools/cu2qu/cu2qu.py":18 + * # limitations under the License. + * + * try: # <<<<<<<<<<<<<< + * import cython + * except (AttributeError, ImportError): +*/ + { + (void)__pyx_t_1; (void)__pyx_t_2; (void)__pyx_t_3; /* mark used */ + /*try:*/ { + + /* "fontTools/cu2qu/cu2qu.py":19 + * + * try: + * import cython # <<<<<<<<<<<<<< + * except (AttributeError, ImportError): + * # if cython not installed, use mock module with no-op decorators and types +*/ + } + } + + /* "fontTools/cu2qu/cu2qu.py":23 + * # if cython not installed, use mock module with no-op decorators and types + * from fontTools.misc import cython + * COMPILED = cython.compiled # <<<<<<<<<<<<<< + * + * import math +*/ + if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_COMPILED, Py_True) < (0)) __PYX_ERR(0, 23, __pyx_L1_error) + + /* "fontTools/cu2qu/cu2qu.py":25 + * COMPILED = cython.compiled + * + * import math # <<<<<<<<<<<<<< + * + * from .errors import Error as Cu2QuError, ApproxNotFoundError +*/ + __pyx_t_3 = __Pyx_Import(__pyx_mstate_global->__pyx_n_u_math, 0, 0, NULL, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25, __pyx_L1_error) + __pyx_t_4 = __pyx_t_3; + __Pyx_GOTREF(__pyx_t_4); + if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_math, __pyx_t_4) < (0)) __PYX_ERR(0, 25, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "fontTools/cu2qu/cu2qu.py":27 + * import math + * + * from .errors import Error as Cu2QuError, ApproxNotFoundError # <<<<<<<<<<<<<< + * + * +*/ + { + PyObject* const __pyx_imported_names[] = {__pyx_mstate_global->__pyx_n_u_Error,__pyx_mstate_global->__pyx_n_u_ApproxNotFoundError}; + __pyx_t_3 = __Pyx_Import(__pyx_mstate_global->__pyx_n_u_errors, __pyx_imported_names, 2, __pyx_mstate_global->__pyx_kp_u_fontTools_cu2qu_errors, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 27, __pyx_L1_error) + } + __pyx_t_4 = __pyx_t_3; + __Pyx_GOTREF(__pyx_t_4); + { + PyObject* const __pyx_imported_names[] = {__pyx_mstate_global->__pyx_n_u_Error,__pyx_mstate_global->__pyx_n_u_ApproxNotFoundError}; + for (__pyx_t_5=0; __pyx_t_5 < 2; __pyx_t_5++) { + __pyx_t_6 = __Pyx_ImportFrom(__pyx_t_4, __pyx_imported_names[__pyx_t_5]); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 27, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + switch (__pyx_t_5) { + case 0: + if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_Cu2QuError, __pyx_t_6) < (0)) __PYX_ERR(0, 27, __pyx_L1_error) + break; + default:; + if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_imported_names[__pyx_t_5], __pyx_t_6) < (0)) __PYX_ERR(0, 27, __pyx_L1_error) + } + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + } + } + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "fontTools/cu2qu/cu2qu.py":30 + * + * + * __all__ = ["curve_to_quadratic", "curves_to_quadratic"] # <<<<<<<<<<<<<< + * + * MAX_N = 100 +*/ + __pyx_t_4 = __Pyx_PyList_Pack(2, __pyx_mstate_global->__pyx_n_u_curve_to_quadratic, __pyx_mstate_global->__pyx_n_u_curves_to_quadratic); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 30, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_all, __pyx_t_4) < (0)) __PYX_ERR(0, 30, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "fontTools/cu2qu/cu2qu.py":32 + * __all__ = ["curve_to_quadratic", "curves_to_quadratic"] + * + * MAX_N = 100 # <<<<<<<<<<<<<< + * + * NAN = float("NaN") +*/ + if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_MAX_N, __pyx_mstate_global->__pyx_int_100) < (0)) __PYX_ERR(0, 32, __pyx_L1_error) + + /* "fontTools/cu2qu/cu2qu.py":34 + * MAX_N = 100 + * + * NAN = float("NaN") # <<<<<<<<<<<<<< + * + * +*/ + __pyx_t_7 = __Pyx_PyUnicode_AsDouble(__pyx_mstate_global->__pyx_n_u_NaN); if (unlikely(__PYX_CHECK_FLOAT_EXCEPTION(__pyx_t_7, ((double)((double)-1))) && PyErr_Occurred())) __PYX_ERR(0, 34, __pyx_L1_error) + __pyx_t_4 = PyFloat_FromDouble(__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 34, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_NAN, __pyx_t_4) < (0)) __PYX_ERR(0, 34, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "fontTools/cu2qu/cu2qu.py":150 + * + * + * @cython.locals( # <<<<<<<<<<<<<< + * p0=cython.complex, + * p1=cython.complex, +*/ + __pyx_t_4 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_5cu2qu_5cu2qu_1_split_cubic_into_n_gen, 0, __pyx_mstate_global->__pyx_n_u_split_cubic_into_n_gen, NULL, __pyx_mstate_global->__pyx_n_u_fontTools_cu2qu_cu2qu, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 150, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000 + PyUnstable_Object_EnableDeferredRefcount(__pyx_t_4); + #endif + if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_split_cubic_into_n_gen, __pyx_t_4) < (0)) __PYX_ERR(0, 150, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "fontTools/cu2qu/cu2qu.py":471 + * @cython.locals(n=cython.int) + * @cython.locals(all_quadratic=cython.int) + * def curve_to_quadratic(curve, max_err, all_quadratic=True): # <<<<<<<<<<<<<< + * """Approximate a cubic Bezier curve with a spline of n quadratics. + * +*/ + __pyx_t_4 = __Pyx_PyBool_FromLong(((int)1)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 471, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + + /* "fontTools/cu2qu/cu2qu.py":468 + * + * + * @cython.locals(max_err=cython.double) # <<<<<<<<<<<<<< + * @cython.locals(n=cython.int) + * @cython.locals(all_quadratic=cython.int) +*/ + __pyx_t_6 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 468, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_5cu2qu_5cu2qu_4curve_to_quadratic, 0, __pyx_mstate_global->__pyx_n_u_curve_to_quadratic, NULL, __pyx_mstate_global->__pyx_n_u_fontTools_cu2qu_cu2qu, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[1])); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 468, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000 + PyUnstable_Object_EnableDeferredRefcount(__pyx_t_4); + #endif + __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_4, __pyx_t_6); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_curve_to_quadratic, __pyx_t_4) < (0)) __PYX_ERR(0, 468, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "fontTools/cu2qu/cu2qu.py":505 + * @cython.locals(l=cython.int, last_i=cython.int, i=cython.int) + * @cython.locals(all_quadratic=cython.int) + * def curves_to_quadratic(curves, max_errors, all_quadratic=True): # <<<<<<<<<<<<<< + * """Return quadratic Bezier splines approximating the input cubic Beziers. + * +*/ + __pyx_t_4 = __Pyx_PyBool_FromLong(((int)1)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 505, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + + /* "fontTools/cu2qu/cu2qu.py":503 + * + * + * @cython.locals(l=cython.int, last_i=cython.int, i=cython.int) # <<<<<<<<<<<<<< + * @cython.locals(all_quadratic=cython.int) + * def curves_to_quadratic(curves, max_errors, all_quadratic=True): +*/ + __pyx_t_6 = PyTuple_Pack(1, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 503, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_CyFunction_New(&__pyx_mdef_9fontTools_5cu2qu_5cu2qu_6curves_to_quadratic, 0, __pyx_mstate_global->__pyx_n_u_curves_to_quadratic, NULL, __pyx_mstate_global->__pyx_n_u_fontTools_cu2qu_cu2qu, __pyx_mstate_global->__pyx_d, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[2])); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 503, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000 + PyUnstable_Object_EnableDeferredRefcount(__pyx_t_4); + #endif + __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_4, __pyx_t_6); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_curves_to_quadratic, __pyx_t_4) < (0)) __PYX_ERR(0, 503, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "fontTools/cu2qu/cu2qu.py":1 + * # cython: language_level=3 # <<<<<<<<<<<<<< + * # distutils: define_macros=CYTHON_TRACE_NOGIL=1 + * +*/ + __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (PyDict_SetItem(__pyx_t_4, __pyx_mstate_global->__pyx_kp_u_curves_to_quadratic_line_503, __pyx_mstate_global->__pyx_kp_u_Return_quadratic_Bezier_splines) < (0)) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_test, __pyx_t_4) < (0)) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /*--- Wrapped vars code ---*/ + + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_6); + if (__pyx_m) { + if (__pyx_mstate->__pyx_d && stringtab_initialized) { + __Pyx_AddTraceback("init fontTools.cu2qu.cu2qu", __pyx_clineno, __pyx_lineno, __pyx_filename); + } + #if !CYTHON_USE_MODULE_STATE + Py_CLEAR(__pyx_m); + #else + Py_DECREF(__pyx_m); + if (pystate_addmodule_run) { + PyObject *tp, *value, *tb; + PyErr_Fetch(&tp, &value, &tb); + PyState_RemoveModule(&__pyx_moduledef); + PyErr_Restore(tp, value, tb); + } + #endif + } else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_ImportError, "init fontTools.cu2qu.cu2qu"); + } + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + #if CYTHON_PEP489_MULTI_PHASE_INIT + return (__pyx_m != NULL) ? 0 : -1; + #else + return __pyx_m; + #endif +} +/* #### Code section: pystring_table ### */ +/* #### Code section: cached_builtins ### */ + +static int __Pyx_InitCachedBuiltins(__pyx_mstatetype *__pyx_mstate) { + CYTHON_UNUSED_VAR(__pyx_mstate); + + /* Cached unbound methods */ + __pyx_mstate->__pyx_umethod_PyDict_Type_items.type = (PyObject*)&PyDict_Type; + __pyx_mstate->__pyx_umethod_PyDict_Type_items.method_name = &__pyx_mstate->__pyx_n_u_items; + __pyx_mstate->__pyx_umethod_PyDict_Type_pop.type = (PyObject*)&PyDict_Type; + __pyx_mstate->__pyx_umethod_PyDict_Type_pop.method_name = &__pyx_mstate->__pyx_n_u_pop; + __pyx_mstate->__pyx_umethod_PyDict_Type_values.type = (PyObject*)&PyDict_Type; + __pyx_mstate->__pyx_umethod_PyDict_Type_values.method_name = &__pyx_mstate->__pyx_n_u_values; + return 0; +} +/* #### Code section: cached_constants ### */ + +static int __Pyx_InitCachedConstants(__pyx_mstatetype *__pyx_mstate) { + __Pyx_RefNannyDeclarations + CYTHON_UNUSED_VAR(__pyx_mstate); + __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); + __Pyx_RefNannyFinishContext(); + return 0; +} +/* #### Code section: init_constants ### */ + +static int __Pyx_InitConstants(__pyx_mstatetype *__pyx_mstate) { + CYTHON_UNUSED_VAR(__pyx_mstate); + { + const struct { const unsigned int length: 11; } index[] = {{1},{28},{1566},{1},{30},{7},{6},{22},{2},{9},{19},{8},{10},{5},{5},{3},{3},{20},{1},{2},{7},{13},{18},{1},{2},{1},{2},{18},{5},{5},{18},{6},{19},{1},{2},{7},{7},{2},{6},{21},{8},{1},{4},{13},{5},{5},{1},{6},{8},{4},{7},{10},{10},{1},{8},{4},{1},{2},{2},{2},{2},{3},{12},{4},{1},{4},{12},{10},{6},{7},{23},{2},{4},{8},{5},{5},{6},{97},{211},{2}}; + #if (CYTHON_COMPRESS_STRINGS) == 2 /* compression: bz2 (1382 bytes) */ +const char* const cstring = "BZh91AY&SY\005#\031&\000\0012\377\377\357_\356\002~\377\377\363\277\267\377\312\377\357\377\364@@@@@@@@@\000@@@\000@\000P\004\335\307=\272\353\305\252\2734y\007\23454Hj\031\224\3231\021\372\232\021\221\223#\324\03140\2324\000\000\000a\001\246\231\036\240\320\204\301\032\tO\3224#i=O\n\014 \006@h\000\000\031\032\003& \320\tM\021\032&\204\312~\2044\323T\365?\022\201\221\265\000\321\3522= \r\001\240\001\223M\000=@\323SU7\252z\236\223\323\324\206@\000\003@h\0004\001\220\000\000\001\243G\250\365\000%5S(f\221\352\003L\324\006\232\017P\321\240\032\003C@\000\000\000\000\000\026l\225N@\031\276\264\242\036\352\026\030\277\007\350\005*\002$\001\257;\371\250\230\270\357\001\0310\200\342\364\237\376;\357\302\307\177\257\365\367^\311\226sR\220?\353\341\267\357\373ro\033\022/n\001l\211n\207\210\226a\377\331\034>J\3271\307~\234\346\226\001\204\3145\347\004\031z b\021\341\342\354(4V\215]\236\361\354\366\243\367\354\317\300Z\326\246i-\273\013\232\224\313'\214y\n\024+\264\360HC\000\3338\010L|\030\324Lk\300\026~\034P\214\341\006\207\344\000\205(\204a-t\303z\330\010E\210\265\217\201=\223\024*\026\002\356c\034Q\225\013\300.\177z\362&\261\024\353\177\205MYBx\000+\307\003\350kW\271\270\216\261\242\340\034V\207R\234\230g\312\276U\357\026>}u\255\\\371t\377\332\322Z\322N\336\246[\251\227}\223\275\031\335\275X\\N\226\2227\351\335\264\221\366\263\255\214L\226K\037K\337\217\332\243\267\343\247\343\263\363\205\311\362\215d3\3751+]\254\336N\353i7\035d\335l0\332\035\337\031\223\213\305\225\217+\337\3027y\267pQ}\360\351\267\205kK7\223nB\222\223\264\227]\317\326\263x\364\353x\335\250Y\274\2214\340\340\337\254\221y\243\2225|=q\223\366d\361&\360\217\323\235\364h\262\270\226\354\246\245\t\034\201\300R\341\312\237cw\274\375\376\372\373\326y\367\334\377p\327\272\364]\332\235\254\336Jdz;mO\276.MVK \271zGs\327&\313\267\222\243t\t\366\313k\311\216\271\263\225\022-WO\376H\0274\024Ws\322\223\214d'#\177\374\303\370\364\275:o}\350\316B\372\017dB\021\374"; + PyObject *data = __Pyx_DecompressString(cstring, 1265, 1); + if (unlikely(!data)) __PYX_ERR(0, 1, __pyx_L1_error) + const char* const bytes = __Pyx_PyBytes_AsString(data); + #if !CYTHON_ASSUME_SAFE_MACROS + if (likely(bytes)); else { Py_DECREF(data); __PYX_ERR(0, 1, __pyx_L1_error) } + #endif + #else /* compression: none (2435 bytes) */ +const char* const bytes = ".Lib/fontTools/cu2qu/cu2qu.pyReturn quadratic Bezier splines approximating the input cubic Beziers.\n\n Args:\n curves: A sequence of *n* curves, each curve being a sequence of four\n 2D tuples.\n max_errors: A sequence of *n* floats representing the maximum permissible\n deviation from each of the cubic Bezier curves.\n all_quadratic (bool): If True (default) returned values are a\n quadratic spline. If False, they are either a single quadratic\n curve or a single cubic curve.\n\n Example::\n\n >>> curves_to_quadratic( [\n ... [ (50,50), (100,100), (150,100), (200,50) ],\n ... [ (75,50), (120,100), (150,75), (200,60) ]\n ... ], [1,1] )\n [[(50.0, 50.0), (75.0, 75.0), (125.0, 91.66666666666666), (175.0, 75.0), (200.0, 50.0)], [(75.0, 50.0), (97.5, 75.0), (135.41666666666666, 82.08333333333333), (175.0, 67.5), (200.0, 60.0)]]\n\n The returned splines have \"implied oncurve points\" suitable for use in\n TrueType ``glif`` outlines - i.e. in the first spline returned above,\n the first quadratic segment runs from (50,50) to\n ( (75 + 125)/2 , (120 + 91.666..)/2 ) = (100, 83.333...).\n\n Returns:\n If all_quadratic is True, a list of splines, each spline being a list\n of 2D tuples.\n\n If all_quadratic is False, a list of curves, each curve being a quadratic\n (length 3), or cubic (length 4).\n\n Raises:\n fontTools.cu2qu.Errors.ApproxNotFoundError: if no suitable approximation\n can be found for all curves with the given parameters.\n ?curves_to_quadratic (line 503)disableenablefontTools.cu2qu.errorsgcisenabledApproxNotFoundErrorCOMPILEDCu2QuErrorErrorMAX_NNANNaN__Pyx_PyDict_NextRefaa1__all__all_quadraticasyncio.coroutinesbb1cc1cline_in_tracebackclosecurvecurve_to_quadraticcurvescurves_to_quadraticdd1delta_2delta_3dterrorsfontTools.cu2qu.cu2qu__func__iimag_is_coroutineisnanitemsllast_i__main__math""max_errmax_errors__module__n__name__nextpp0p1p2p3pop__qualname__realssend__set_name__setdefaultsplinesplines_split_cubic_into_n_gent1t1_2__test__throwvaluevalues\200\001\360\006\000()\360*\000\005\r\210A\210W\220B\220c\230\024\230U\240!\340\004\010\210\005\210U\220!\2203\220f\230B\230a\330\010\021\320\021$\240A\240W\250C\250y\270\001\330\010\013\2107\220'\230\021\340\014\023\2202\220Q\220g\230Q\230g\240T\250\025\250a\340\004\n\320\n\035\230Q\230a\200\001\340,-\360J\001\000\005\016\210Q\210a\210w\220b\230\003\2304\230u\240G\2504\250y\270\001\330\004\013\2103\210a\210|\2303\230c\240\021\240!\340\004\010\210\003\2101\210A\330\004\016\210a\210v\220R\220q\330\004\r\210T\220\021\330\004\010\210\001\330\004\005\330\010\021\320\021$\240A\240V\2501\250D\260\003\260:\270Q\270d\300!\330\010\013\2107\220#\220Q\330\014\017\210r\220\023\220A\330\020\021\330\014\021\220\021\330\014\025\220Q\330\014\r\330\010\017\210q\220\005\220Q\330\010\r\210R\210r\220\023\220B\220a\330\010\013\2102\210S\220\001\340\014\023\2201\220B\220a\220w\230a\230w\240d\250%\250x\260t\270:\300Q\340\004\n\320\n\035\230Q\230a\200\001"; + PyObject *data = NULL; + CYTHON_UNUSED_VAR(__Pyx_DecompressString); + #endif + PyObject **stringtab = __pyx_mstate->__pyx_string_tab; + Py_ssize_t pos = 0; + for (int i = 0; i < 77; i++) { + Py_ssize_t bytes_length = index[i].length; + PyObject *string = PyUnicode_DecodeUTF8(bytes + pos, bytes_length, NULL); + if (likely(string) && i >= 10) PyUnicode_InternInPlace(&string); + if (unlikely(!string)) { + Py_XDECREF(data); + __PYX_ERR(0, 1, __pyx_L1_error) + } + stringtab[i] = string; + pos += bytes_length; + } + for (int i = 77; i < 80; i++) { + Py_ssize_t bytes_length = index[i].length; + PyObject *string = PyBytes_FromStringAndSize(bytes + pos, bytes_length); + stringtab[i] = string; + pos += bytes_length; + if (unlikely(!string)) { + Py_XDECREF(data); + __PYX_ERR(0, 1, __pyx_L1_error) + } + } + Py_XDECREF(data); + for (Py_ssize_t i = 0; i < 80; i++) { + if (unlikely(PyObject_Hash(stringtab[i]) == -1)) { + __PYX_ERR(0, 1, __pyx_L1_error) + } + } + #if CYTHON_IMMORTAL_CONSTANTS + { + PyObject **table = stringtab + 77; + for (Py_ssize_t i=0; i<3; ++i) { + #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING + Py_SET_REFCNT(table[i], _Py_IMMORTAL_REFCNT_LOCAL); + #else + Py_SET_REFCNT(table[i], _Py_IMMORTAL_INITIAL_REFCNT); + #endif + } + } + #endif + } + { + PyObject **numbertab = __pyx_mstate->__pyx_number_tab + 0; + int8_t const cint_constants_1[] = {1,2,3,4,6,100}; + for (int i = 0; i < 6; i++) { + numbertab[i] = PyLong_FromLong(cint_constants_1[i - 0]); + if (unlikely(!numbertab[i])) __PYX_ERR(0, 1, __pyx_L1_error) + } + } + #if CYTHON_IMMORTAL_CONSTANTS + { + PyObject **table = __pyx_mstate->__pyx_number_tab; + for (Py_ssize_t i=0; i<6; ++i) { + #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING + Py_SET_REFCNT(table[i], _Py_IMMORTAL_REFCNT_LOCAL); + #else + Py_SET_REFCNT(table[i], _Py_IMMORTAL_INITIAL_REFCNT); + #endif + } + } + #endif + return 0; + __pyx_L1_error:; + return -1; +} +/* #### Code section: init_codeobjects ### */ +typedef struct { + unsigned int argcount : 3; + unsigned int num_posonly_args : 1; + unsigned int num_kwonly_args : 1; + unsigned int nlocals : 5; + unsigned int flags : 10; + unsigned int first_line : 9; +} __Pyx_PyCode_New_function_description; +/* NewCodeObj.proto */ +static PyObject* __Pyx_PyCode_New( + const __Pyx_PyCode_New_function_description descr, + PyObject * const *varnames, + PyObject *filename, + PyObject *funcname, + PyObject *line_table, + PyObject *tuple_dedup_map +); + + +static int __Pyx_CreateCodeObjects(__pyx_mstatetype *__pyx_mstate) { + PyObject* tuple_dedup_map = PyDict_New(); + if (unlikely(!tuple_dedup_map)) return -1; + { + const __Pyx_PyCode_New_function_description descr = {5, 0, 0, 19, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS|CO_GENERATOR), 150}; + PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_p0, __pyx_mstate->__pyx_n_u_p1, __pyx_mstate->__pyx_n_u_p2, __pyx_mstate->__pyx_n_u_p3, __pyx_mstate->__pyx_n_u_n, __pyx_mstate->__pyx_n_u_a1, __pyx_mstate->__pyx_n_u_b1, __pyx_mstate->__pyx_n_u_c1, __pyx_mstate->__pyx_n_u_d1, __pyx_mstate->__pyx_n_u_dt, __pyx_mstate->__pyx_n_u_delta_2, __pyx_mstate->__pyx_n_u_delta_3, __pyx_mstate->__pyx_n_u_i, __pyx_mstate->__pyx_n_u_a, __pyx_mstate->__pyx_n_u_b, __pyx_mstate->__pyx_n_u_c, __pyx_mstate->__pyx_n_u_d, __pyx_mstate->__pyx_n_u_t1, __pyx_mstate->__pyx_n_u_t1_2}; + __pyx_mstate_global->__pyx_codeobj_tab[0] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_Lib_fontTools_cu2qu_cu2qu_py, __pyx_mstate->__pyx_n_u_split_cubic_into_n_gen, __pyx_mstate->__pyx_kp_b_iso88591__3, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[0])) goto bad; + } + { + const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 7, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 468}; + PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_curve, __pyx_mstate->__pyx_n_u_max_err, __pyx_mstate->__pyx_n_u_all_quadratic, __pyx_mstate->__pyx_n_u_n, __pyx_mstate->__pyx_n_u_spline, __pyx_mstate->__pyx_n_u_p, __pyx_mstate->__pyx_n_u_s}; + __pyx_mstate_global->__pyx_codeobj_tab[1] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_Lib_fontTools_cu2qu_cu2qu_py, __pyx_mstate->__pyx_n_u_curve_to_quadratic, __pyx_mstate->__pyx_kp_b_iso88591_AWBc_U_U_3fBa_AWCy_7_2QgQgT_a_Q, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[1])) goto bad; + } + { + const __Pyx_PyCode_New_function_description descr = {3, 0, 0, 13, (unsigned int)(CO_OPTIMIZED|CO_NEWLOCALS), 503}; + PyObject* const varnames[] = {__pyx_mstate->__pyx_n_u_curves, __pyx_mstate->__pyx_n_u_max_errors, __pyx_mstate->__pyx_n_u_all_quadratic, __pyx_mstate->__pyx_n_u_l, __pyx_mstate->__pyx_n_u_last_i, __pyx_mstate->__pyx_n_u_i, __pyx_mstate->__pyx_n_u_splines, __pyx_mstate->__pyx_n_u_n, __pyx_mstate->__pyx_n_u_spline, __pyx_mstate->__pyx_n_u_curve, __pyx_mstate->__pyx_n_u_p, __pyx_mstate->__pyx_n_u_spline, __pyx_mstate->__pyx_n_u_s}; + __pyx_mstate_global->__pyx_codeobj_tab[2] = __Pyx_PyCode_New(descr, varnames, __pyx_mstate->__pyx_kp_u_Lib_fontTools_cu2qu_cu2qu_py, __pyx_mstate->__pyx_n_u_curves_to_quadratic, __pyx_mstate->__pyx_kp_b_iso88591_J_Qawb_4uG4y_3a_3c_1A_avRq_T_AV, tuple_dedup_map); if (unlikely(!__pyx_mstate_global->__pyx_codeobj_tab[2])) goto bad; + } + Py_DECREF(tuple_dedup_map); + return 0; + bad: + Py_DECREF(tuple_dedup_map); + return -1; +} +/* #### Code section: init_globals ### */ + +static int __Pyx_InitGlobals(void) { + /* PythonCompatibility.init */ + if (likely(__Pyx_init_co_variables() == 0)); else + + if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) + + /* AssertionsEnabled.init */ + if (likely(__Pyx_init_assertions_enabled() == 0)); else + + if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) + + /* CommonTypesMetaclass.init */ + if (likely(__pyx_CommonTypesMetaclass_init(__pyx_m) == 0)); else + + if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) + + /* CachedMethodType.init */ + #if CYTHON_COMPILING_IN_LIMITED_API + { + PyObject *typesModule=NULL; + typesModule = PyImport_ImportModule("types"); + if (typesModule) { + __pyx_mstate_global->__Pyx_CachedMethodType = PyObject_GetAttrString(typesModule, "MethodType"); + Py_DECREF(typesModule); + } + } // error handling follows + #endif + + if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) + + /* CythonFunctionShared.init */ + if (likely(__pyx_CyFunction_init(__pyx_m) == 0)); else + + if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) + + /* Generator.init */ + if (likely(__pyx_Generator_init(__pyx_m) == 0)); else + + if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) + + return 0; + __pyx_L1_error:; + return -1; +} +/* #### Code section: cleanup_globals ### */ +/* #### Code section: cleanup_module ### */ +/* #### Code section: main_method ### */ +/* #### Code section: utility_code_pragmas ### */ +#ifdef _MSC_VER +#pragma warning( push ) +/* Warning 4127: conditional expression is constant + * Cython uses constant conditional expressions to allow in inline functions to be optimized at + * compile-time, so this warning is not useful + */ +#pragma warning( disable : 4127 ) +#endif + + + +/* #### Code section: utility_code_def ### */ + +/* --- Runtime support code --- */ +/* Refnanny */ +#if CYTHON_REFNANNY +static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { + PyObject *m = NULL, *p = NULL; + void *r = NULL; + m = PyImport_ImportModule(modname); + if (!m) goto end; + p = PyObject_GetAttrString(m, "RefNannyAPI"); + if (!p) goto end; + r = PyLong_AsVoidPtr(p); +end: + Py_XDECREF(p); + Py_XDECREF(m); + return (__Pyx_RefNannyAPIStruct *)r; +} +#endif + +/* PyObjectCall (used by PyObjectFastCall) */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { + PyObject *result; + ternaryfunc call = Py_TYPE(func)->tp_call; + if (unlikely(!call)) + return PyObject_Call(func, arg, kw); + if (unlikely(Py_EnterRecursiveCall(" while calling a Python object"))) + return NULL; + result = (*call)(func, arg, kw); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyObjectCallMethO (used by PyObjectFastCall) */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { + PyObject *self, *result; + PyCFunction cfunc; + cfunc = __Pyx_CyOrPyCFunction_GET_FUNCTION(func); + self = __Pyx_CyOrPyCFunction_GET_SELF(func); + if (unlikely(Py_EnterRecursiveCall(" while calling a Python object"))) + return NULL; + result = cfunc(self, arg); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyObjectFastCall */ +#if PY_VERSION_HEX < 0x03090000 || CYTHON_COMPILING_IN_LIMITED_API +static PyObject* __Pyx_PyObject_FastCall_fallback(PyObject *func, PyObject * const*args, size_t nargs, PyObject *kwargs) { + PyObject *argstuple; + PyObject *result = 0; + size_t i; + argstuple = PyTuple_New((Py_ssize_t)nargs); + if (unlikely(!argstuple)) return NULL; + for (i = 0; i < nargs; i++) { + Py_INCREF(args[i]); + if (__Pyx_PyTuple_SET_ITEM(argstuple, (Py_ssize_t)i, args[i]) != (0)) goto bad; + } + result = __Pyx_PyObject_Call(func, argstuple, kwargs); + bad: + Py_DECREF(argstuple); + return result; +} +#endif +#if CYTHON_VECTORCALL && !CYTHON_COMPILING_IN_LIMITED_API + #if PY_VERSION_HEX < 0x03090000 + #define __Pyx_PyVectorcall_Function(callable) _PyVectorcall_Function(callable) + #elif CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE vectorcallfunc __Pyx_PyVectorcall_Function(PyObject *callable) { + PyTypeObject *tp = Py_TYPE(callable); + #if defined(__Pyx_CyFunction_USED) + if (__Pyx_CyFunction_CheckExact(callable)) { + return __Pyx_CyFunction_func_vectorcall(callable); + } + #endif + if (!PyType_HasFeature(tp, Py_TPFLAGS_HAVE_VECTORCALL)) { + return NULL; + } + assert(PyCallable_Check(callable)); + Py_ssize_t offset = tp->tp_vectorcall_offset; + assert(offset > 0); + vectorcallfunc ptr; + memcpy(&ptr, (char *) callable + offset, sizeof(ptr)); + return ptr; +} + #else + #define __Pyx_PyVectorcall_Function(callable) PyVectorcall_Function(callable) + #endif +#endif +static CYTHON_INLINE PyObject* __Pyx_PyObject_FastCallDict(PyObject *func, PyObject *const *args, size_t _nargs, PyObject *kwargs) { + Py_ssize_t nargs = __Pyx_PyVectorcall_NARGS(_nargs); +#if CYTHON_COMPILING_IN_CPYTHON + if (nargs == 0 && kwargs == NULL) { + if (__Pyx_CyOrPyCFunction_Check(func) && likely( __Pyx_CyOrPyCFunction_GET_FLAGS(func) & METH_NOARGS)) + return __Pyx_PyObject_CallMethO(func, NULL); + } + else if (nargs == 1 && kwargs == NULL) { + if (__Pyx_CyOrPyCFunction_Check(func) && likely( __Pyx_CyOrPyCFunction_GET_FLAGS(func) & METH_O)) + return __Pyx_PyObject_CallMethO(func, args[0]); + } +#endif + if (kwargs == NULL) { + #if CYTHON_VECTORCALL + #if CYTHON_COMPILING_IN_LIMITED_API + return PyObject_Vectorcall(func, args, _nargs, NULL); + #else + vectorcallfunc f = __Pyx_PyVectorcall_Function(func); + if (f) { + return f(func, args, _nargs, NULL); + } + #endif + #endif + } + if (nargs == 0) { + return __Pyx_PyObject_Call(func, __pyx_mstate_global->__pyx_empty_tuple, kwargs); + } + #if PY_VERSION_HEX >= 0x03090000 && !CYTHON_COMPILING_IN_LIMITED_API + return PyObject_VectorcallDict(func, args, (size_t)nargs, kwargs); + #else + return __Pyx_PyObject_FastCall_fallback(func, args, (size_t)nargs, kwargs); + #endif +} + +/* PyLongCompare */ +static CYTHON_INLINE int __Pyx_PyLong_BoolEqObjC(PyObject *op1, PyObject *op2, long intval, long inplace) { + CYTHON_MAYBE_UNUSED_VAR(intval); + CYTHON_UNUSED_VAR(inplace); + if (op1 == op2) { + return 1; + } + #if CYTHON_USE_PYLONG_INTERNALS + if (likely(PyLong_CheckExact(op1))) { + int unequal; + unsigned long uintval; + Py_ssize_t size = __Pyx_PyLong_DigitCount(op1); + const digit* digits = __Pyx_PyLong_Digits(op1); + if (intval == 0) { + return (__Pyx_PyLong_IsZero(op1) == 1); + } else if (intval < 0) { + if (__Pyx_PyLong_IsNonNeg(op1)) + return 0; + intval = -intval; + } else { + if (__Pyx_PyLong_IsNeg(op1)) + return 0; + } + uintval = (unsigned long) intval; +#if PyLong_SHIFT * 4 < SIZEOF_LONG*8 + if (uintval >> (PyLong_SHIFT * 4)) { + unequal = (size != 5) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) + | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[3] != ((uintval >> (3 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[4] != ((uintval >> (4 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); + } else +#endif +#if PyLong_SHIFT * 3 < SIZEOF_LONG*8 + if (uintval >> (PyLong_SHIFT * 3)) { + unequal = (size != 4) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) + | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[3] != ((uintval >> (3 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); + } else +#endif +#if PyLong_SHIFT * 2 < SIZEOF_LONG*8 + if (uintval >> (PyLong_SHIFT * 2)) { + unequal = (size != 3) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) + | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); + } else +#endif +#if PyLong_SHIFT * 1 < SIZEOF_LONG*8 + if (uintval >> (PyLong_SHIFT * 1)) { + unequal = (size != 2) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) + | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); + } else +#endif + unequal = (size != 1) || (((unsigned long) digits[0]) != (uintval & (unsigned long) PyLong_MASK)); + return (unequal == 0); + } + #endif + if (PyFloat_CheckExact(op1)) { + const long b = intval; + double a = __Pyx_PyFloat_AS_DOUBLE(op1); + return ((double)a == (double)b); + } + return __Pyx_PyObject_IsTrueAndDecref( + PyObject_RichCompare(op1, op2, Py_EQ)); +} + +/* RaiseTooManyValuesToUnpack */ +static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { + PyErr_Format(PyExc_ValueError, + "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); +} + +/* RaiseNeedMoreValuesToUnpack */ +static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { + PyErr_Format(PyExc_ValueError, + "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", + index, (index == 1) ? "" : "s"); +} + +/* PyErrFetchRestore (used by IterFinish) */ +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { +#if PY_VERSION_HEX >= 0x030C00A6 + PyObject *tmp_value; + assert(type == NULL || (value != NULL && type == (PyObject*) Py_TYPE(value))); + if (value) { + #if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(((PyBaseExceptionObject*) value)->traceback != tb)) + #endif + PyException_SetTraceback(value, tb); + } + tmp_value = tstate->current_exception; + tstate->current_exception = value; + Py_XDECREF(tmp_value); + Py_XDECREF(type); + Py_XDECREF(tb); +#else + PyObject *tmp_type, *tmp_value, *tmp_tb; + tmp_type = tstate->curexc_type; + tmp_value = tstate->curexc_value; + tmp_tb = tstate->curexc_traceback; + tstate->curexc_type = type; + tstate->curexc_value = value; + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +#endif +} +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { +#if PY_VERSION_HEX >= 0x030C00A6 + PyObject* exc_value; + exc_value = tstate->current_exception; + tstate->current_exception = 0; + *value = exc_value; + *type = NULL; + *tb = NULL; + if (exc_value) { + *type = (PyObject*) Py_TYPE(exc_value); + Py_INCREF(*type); + #if CYTHON_COMPILING_IN_CPYTHON + *tb = ((PyBaseExceptionObject*) exc_value)->traceback; + Py_XINCREF(*tb); + #else + *tb = PyException_GetTraceback(exc_value); + #endif + } +#else + *type = tstate->curexc_type; + *value = tstate->curexc_value; + *tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +#endif +} +#endif + +/* IterFinish */ +static CYTHON_INLINE int __Pyx_IterFinish(void) { + PyObject* exc_type; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + exc_type = __Pyx_PyErr_CurrentExceptionType(); + if (unlikely(exc_type)) { + if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) + return -1; + __Pyx_PyErr_Clear(); + return 0; + } + return 0; +} + +/* UnpackItemEndCheck */ +static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { + if (unlikely(retval)) { + Py_DECREF(retval); + __Pyx_RaiseTooManyValuesError(expected); + return -1; + } + return __Pyx_IterFinish(); +} + +/* GetItemInt */ +static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { + PyObject *r; + if (unlikely(!j)) return NULL; + r = PyObject_GetItem(o, j); + Py_DECREF(j); + return r; +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, + int wraparound, int boundscheck, int unsafe_shared) { + CYTHON_MAYBE_UNUSED_VAR(unsafe_shared); +#if CYTHON_ASSUME_SAFE_SIZE + Py_ssize_t wrapped_i = i; + if (wraparound & unlikely(i < 0)) { + wrapped_i += PyList_GET_SIZE(o); + } + if ((CYTHON_AVOID_BORROWED_REFS || CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS || !CYTHON_ASSUME_SAFE_MACROS)) { + return __Pyx_PyList_GetItemRefFast(o, wrapped_i, unsafe_shared); + } else + if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { + return __Pyx_NewRef(PyList_GET_ITEM(o, wrapped_i)); + } + return __Pyx_GetItemInt_Generic(o, PyLong_FromSsize_t(i)); +#else + (void)wraparound; + (void)boundscheck; + return PySequence_GetItem(o, i); +#endif +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, + int wraparound, int boundscheck, int unsafe_shared) { + CYTHON_MAYBE_UNUSED_VAR(unsafe_shared); +#if CYTHON_ASSUME_SAFE_SIZE && CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + Py_ssize_t wrapped_i = i; + if (wraparound & unlikely(i < 0)) { + wrapped_i += PyTuple_GET_SIZE(o); + } + if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { + return __Pyx_NewRef(PyTuple_GET_ITEM(o, wrapped_i)); + } + return __Pyx_GetItemInt_Generic(o, PyLong_FromSsize_t(i)); +#else + (void)wraparound; + (void)boundscheck; + return PySequence_GetItem(o, i); +#endif +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, + int wraparound, int boundscheck, int unsafe_shared) { + CYTHON_MAYBE_UNUSED_VAR(unsafe_shared); +#if CYTHON_ASSUME_SAFE_MACROS && CYTHON_ASSUME_SAFE_SIZE + if (is_list || PyList_CheckExact(o)) { + Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); + if ((CYTHON_AVOID_BORROWED_REFS || CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS)) { + return __Pyx_PyList_GetItemRefFast(o, n, unsafe_shared); + } else if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { + return __Pyx_NewRef(PyList_GET_ITEM(o, n)); + } + } else + #if !CYTHON_AVOID_BORROWED_REFS + if (PyTuple_CheckExact(o)) { + Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); + if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { + return __Pyx_NewRef(PyTuple_GET_ITEM(o, n)); + } + } else + #endif +#endif +#if CYTHON_USE_TYPE_SLOTS && !CYTHON_COMPILING_IN_PYPY + { + PyMappingMethods *mm = Py_TYPE(o)->tp_as_mapping; + PySequenceMethods *sm = Py_TYPE(o)->tp_as_sequence; + if (!is_list && mm && mm->mp_subscript) { + PyObject *r, *key = PyLong_FromSsize_t(i); + if (unlikely(!key)) return NULL; + r = mm->mp_subscript(o, key); + Py_DECREF(key); + return r; + } + if (is_list || likely(sm && sm->sq_item)) { + if (wraparound && unlikely(i < 0) && likely(sm->sq_length)) { + Py_ssize_t l = sm->sq_length(o); + if (likely(l >= 0)) { + i += l; + } else { + if (!PyErr_ExceptionMatches(PyExc_OverflowError)) + return NULL; + PyErr_Clear(); + } + } + return sm->sq_item(o, i); + } + } +#else + if (is_list || !PyMapping_Check(o)) { + return PySequence_GetItem(o, i); + } +#endif + (void)wraparound; + (void)boundscheck; + return __Pyx_GetItemInt_Generic(o, PyLong_FromSsize_t(i)); +} + +/* PyErrExceptionMatches (used by PyObjectGetAttrStrNoError) */ +#if CYTHON_FAST_THREAD_STATE +static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(tuple); + for (i=0; i= 0x030C00A6 + PyObject *current_exception = tstate->current_exception; + if (unlikely(!current_exception)) return 0; + exc_type = (PyObject*) Py_TYPE(current_exception); + if (exc_type == err) return 1; +#else + exc_type = tstate->curexc_type; + if (exc_type == err) return 1; + if (unlikely(!exc_type)) return 0; +#endif + #if CYTHON_AVOID_BORROWED_REFS + Py_INCREF(exc_type); + #endif + if (unlikely(PyTuple_Check(err))) { + result = __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); + } else { + result = __Pyx_PyErr_GivenExceptionMatches(exc_type, err); + } + #if CYTHON_AVOID_BORROWED_REFS + Py_DECREF(exc_type); + #endif + return result; +} +#endif + +/* PyObjectGetAttrStr (used by PyObjectGetAttrStrNoError) */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro)) + return tp->tp_getattro(obj, attr_name); + return PyObject_GetAttr(obj, attr_name); +} +#endif + +/* PyObjectGetAttrStrNoError (used by GetBuiltinName) */ +#if __PYX_LIMITED_VERSION_HEX < 0x030d0000 +static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) + __Pyx_PyErr_Clear(); +} +#endif +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { + PyObject *result; +#if __PYX_LIMITED_VERSION_HEX >= 0x030d0000 + (void) PyObject_GetOptionalAttr(obj, attr_name, &result); + return result; +#else +#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { + return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); + } +#endif + result = __Pyx_PyObject_GetAttrStr(obj, attr_name); + if (unlikely(!result)) { + __Pyx_PyObject_GetAttrStr_ClearAttributeError(); + } + return result; +#endif +} + +/* GetBuiltinName (used by GetModuleGlobalName) */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name) { + PyObject* result = __Pyx_PyObject_GetAttrStrNoError(__pyx_mstate_global->__pyx_b, name); + if (unlikely(!result) && !PyErr_Occurred()) { + PyErr_Format(PyExc_NameError, + "name '%U' is not defined", name); + } + return result; +} + +/* PyDictVersioning (used by GetModuleGlobalName) */ +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { + PyObject **dictptr = NULL; + Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; + if (offset) { +#if CYTHON_COMPILING_IN_CPYTHON + dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); +#else + dictptr = _PyObject_GetDictPtr(obj); +#endif + } + return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; +} +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) + return 0; + return obj_dict_version == __Pyx_get_object_dict_version(obj); +} +#endif + +/* GetModuleGlobalName */ +#if CYTHON_USE_DICT_VERSIONS +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) +#else +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) +#endif +{ + PyObject *result; +#if CYTHON_COMPILING_IN_LIMITED_API + if (unlikely(!__pyx_m)) { + if (!PyErr_Occurred()) + PyErr_SetNone(PyExc_NameError); + return NULL; + } + result = PyObject_GetAttr(__pyx_m, name); + if (likely(result)) { + return result; + } + PyErr_Clear(); +#elif CYTHON_AVOID_BORROWED_REFS || CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS + if (unlikely(__Pyx_PyDict_GetItemRef(__pyx_mstate_global->__pyx_d, name, &result) == -1)) PyErr_Clear(); + __PYX_UPDATE_DICT_CACHE(__pyx_mstate_global->__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return result; + } +#else + result = _PyDict_GetItem_KnownHash(__pyx_mstate_global->__pyx_d, name, ((PyASCIIObject *) name)->hash); + __PYX_UPDATE_DICT_CACHE(__pyx_mstate_global->__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } + PyErr_Clear(); +#endif + return __Pyx_GetBuiltinName(name); +} + +/* TupleAndListFromArray (used by fastcall) */ +#if !CYTHON_COMPILING_IN_CPYTHON && CYTHON_METH_FASTCALL +static CYTHON_INLINE PyObject * +__Pyx_PyTuple_FromArray(PyObject *const *src, Py_ssize_t n) +{ + PyObject *res; + Py_ssize_t i; + if (n <= 0) { + return __Pyx_NewRef(__pyx_mstate_global->__pyx_empty_tuple); + } + res = PyTuple_New(n); + if (unlikely(res == NULL)) return NULL; + for (i = 0; i < n; i++) { + if (unlikely(__Pyx_PyTuple_SET_ITEM(res, i, src[i]) < (0))) { + Py_DECREF(res); + return NULL; + } + Py_INCREF(src[i]); + } + return res; +} +#elif CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE void __Pyx_copy_object_array(PyObject *const *CYTHON_RESTRICT src, PyObject** CYTHON_RESTRICT dest, Py_ssize_t length) { + PyObject *v; + Py_ssize_t i; + for (i = 0; i < length; i++) { + v = dest[i] = src[i]; + Py_INCREF(v); + } +} +static CYTHON_INLINE PyObject * +__Pyx_PyTuple_FromArray(PyObject *const *src, Py_ssize_t n) +{ + PyObject *res; + if (n <= 0) { + return __Pyx_NewRef(__pyx_mstate_global->__pyx_empty_tuple); + } + res = PyTuple_New(n); + if (unlikely(res == NULL)) return NULL; + __Pyx_copy_object_array(src, ((PyTupleObject*)res)->ob_item, n); + return res; +} +static CYTHON_INLINE PyObject * +__Pyx_PyList_FromArray(PyObject *const *src, Py_ssize_t n) +{ + PyObject *res; + if (n <= 0) { + return PyList_New(0); + } + res = PyList_New(n); + if (unlikely(res == NULL)) return NULL; + __Pyx_copy_object_array(src, ((PyListObject*)res)->ob_item, n); + return res; +} +#endif + +/* BytesEquals (used by UnicodeEquals) */ +static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { +#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API || CYTHON_COMPILING_IN_GRAAL ||\ + !(CYTHON_ASSUME_SAFE_SIZE && CYTHON_ASSUME_SAFE_MACROS) + return PyObject_RichCompareBool(s1, s2, equals); +#else + if (s1 == s2) { + return (equals == Py_EQ); + } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { + const char *ps1, *ps2; + Py_ssize_t length = PyBytes_GET_SIZE(s1); + if (length != PyBytes_GET_SIZE(s2)) + return (equals == Py_NE); + ps1 = PyBytes_AS_STRING(s1); + ps2 = PyBytes_AS_STRING(s2); + if (ps1[0] != ps2[0]) { + return (equals == Py_NE); + } else if (length == 1) { + return (equals == Py_EQ); + } else { + int result; +#if CYTHON_USE_UNICODE_INTERNALS && (PY_VERSION_HEX < 0x030B0000) + Py_hash_t hash1, hash2; + hash1 = ((PyBytesObject*)s1)->ob_shash; + hash2 = ((PyBytesObject*)s2)->ob_shash; + if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { + return (equals == Py_NE); + } +#endif + result = memcmp(ps1, ps2, (size_t)length); + return (equals == Py_EQ) ? (result == 0) : (result != 0); + } + } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { + return (equals == Py_NE); + } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { + return (equals == Py_NE); + } else { + int result; + PyObject* py_result = PyObject_RichCompare(s1, s2, equals); + if (!py_result) + return -1; + result = __Pyx_PyObject_IsTrue(py_result); + Py_DECREF(py_result); + return result; + } +#endif +} + +/* UnicodeEquals (used by fastcall) */ +static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { +#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API || CYTHON_COMPILING_IN_GRAAL + return PyObject_RichCompareBool(s1, s2, equals); +#else + int s1_is_unicode, s2_is_unicode; + if (s1 == s2) { + goto return_eq; + } + s1_is_unicode = PyUnicode_CheckExact(s1); + s2_is_unicode = PyUnicode_CheckExact(s2); + if (s1_is_unicode & s2_is_unicode) { + Py_ssize_t length, length2; + int kind; + void *data1, *data2; + #if !CYTHON_COMPILING_IN_LIMITED_API + if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) + return -1; + #endif + length = __Pyx_PyUnicode_GET_LENGTH(s1); + #if !CYTHON_ASSUME_SAFE_SIZE + if (unlikely(length < 0)) return -1; + #endif + length2 = __Pyx_PyUnicode_GET_LENGTH(s2); + #if !CYTHON_ASSUME_SAFE_SIZE + if (unlikely(length2 < 0)) return -1; + #endif + if (length != length2) { + goto return_ne; + } +#if CYTHON_USE_UNICODE_INTERNALS + { + Py_hash_t hash1, hash2; + hash1 = ((PyASCIIObject*)s1)->hash; + hash2 = ((PyASCIIObject*)s2)->hash; + if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { + goto return_ne; + } + } +#endif + kind = __Pyx_PyUnicode_KIND(s1); + if (kind != __Pyx_PyUnicode_KIND(s2)) { + goto return_ne; + } + data1 = __Pyx_PyUnicode_DATA(s1); + data2 = __Pyx_PyUnicode_DATA(s2); + if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { + goto return_ne; + } else if (length == 1) { + goto return_eq; + } else { + int result = memcmp(data1, data2, (size_t)(length * kind)); + return (equals == Py_EQ) ? (result == 0) : (result != 0); + } + } else if ((s1 == Py_None) & s2_is_unicode) { + goto return_ne; + } else if ((s2 == Py_None) & s1_is_unicode) { + goto return_ne; + } else { + int result; + PyObject* py_result = PyObject_RichCompare(s1, s2, equals); + if (!py_result) + return -1; + result = __Pyx_PyObject_IsTrue(py_result); + Py_DECREF(py_result); + return result; + } +return_eq: + return (equals == Py_EQ); +return_ne: + return (equals == Py_NE); +#endif +} + +/* fastcall */ +#if CYTHON_METH_FASTCALL +static CYTHON_INLINE PyObject * __Pyx_GetKwValue_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues, PyObject *s) +{ + Py_ssize_t i, n = __Pyx_PyTuple_GET_SIZE(kwnames); + #if !CYTHON_ASSUME_SAFE_SIZE + if (unlikely(n == -1)) return NULL; + #endif + for (i = 0; i < n; i++) + { + PyObject *namei = __Pyx_PyTuple_GET_ITEM(kwnames, i); + #if !CYTHON_ASSUME_SAFE_MACROS + if (unlikely(!namei)) return NULL; + #endif + if (s == namei) return kwvalues[i]; + } + for (i = 0; i < n; i++) + { + PyObject *namei = __Pyx_PyTuple_GET_ITEM(kwnames, i); + #if !CYTHON_ASSUME_SAFE_MACROS + if (unlikely(!namei)) return NULL; + #endif + int eq = __Pyx_PyUnicode_Equals(s, namei, Py_EQ); + if (unlikely(eq != 0)) { + if (unlikely(eq < 0)) return NULL; + return kwvalues[i]; + } + } + return NULL; +} +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030d0000 || CYTHON_COMPILING_IN_LIMITED_API +CYTHON_UNUSED static PyObject *__Pyx_KwargsAsDict_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues) { + Py_ssize_t i, nkwargs; + PyObject *dict; +#if !CYTHON_ASSUME_SAFE_SIZE + nkwargs = PyTuple_Size(kwnames); + if (unlikely(nkwargs < 0)) return NULL; +#else + nkwargs = PyTuple_GET_SIZE(kwnames); +#endif + dict = PyDict_New(); + if (unlikely(!dict)) + return NULL; + for (i=0; itype, *target->method_name); + if (unlikely(!method)) + return -1; + result = method; +#if CYTHON_COMPILING_IN_CPYTHON + if (likely(__Pyx_TypeCheck(method, &PyMethodDescr_Type))) + { + PyMethodDescrObject *descr = (PyMethodDescrObject*) method; + target->func = descr->d_method->ml_meth; + target->flag = descr->d_method->ml_flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_STACKLESS); + } else +#endif +#if CYTHON_COMPILING_IN_PYPY +#else + if (PyCFunction_Check(method)) +#endif + { + PyObject *self; + int self_found; +#if CYTHON_COMPILING_IN_LIMITED_API || CYTHON_COMPILING_IN_PYPY + self = PyObject_GetAttrString(method, "__self__"); + if (!self) { + PyErr_Clear(); + } +#else + self = PyCFunction_GET_SELF(method); +#endif + self_found = (self && self != Py_None); +#if CYTHON_COMPILING_IN_LIMITED_API || CYTHON_COMPILING_IN_PYPY + Py_XDECREF(self); +#endif + if (self_found) { + PyObject *unbound_method = PyCFunction_New(&__Pyx_UnboundCMethod_Def, method); + if (unlikely(!unbound_method)) return -1; + Py_DECREF(method); + result = unbound_method; + } + } +#if !CYTHON_COMPILING_IN_CPYTHON_FREETHREADING + if (unlikely(target->method)) { + Py_DECREF(result); + } else +#endif + target->method = result; + return 0; +} + +/* CallUnboundCMethod0 */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObject* self) { + int was_initialized = __Pyx_CachedCFunction_GetAndSetInitializing(cfunc); + if (likely(was_initialized == 2 && cfunc->func)) { + if (likely(cfunc->flag == METH_NOARGS)) + return __Pyx_CallCFunction(cfunc, self, NULL); + if (likely(cfunc->flag == METH_FASTCALL)) + return __Pyx_CallCFunctionFast(cfunc, self, NULL, 0); + if (cfunc->flag == (METH_FASTCALL | METH_KEYWORDS)) + return __Pyx_CallCFunctionFastWithKeywords(cfunc, self, NULL, 0, NULL); + if (likely(cfunc->flag == (METH_VARARGS | METH_KEYWORDS))) + return __Pyx_CallCFunctionWithKeywords(cfunc, self, __pyx_mstate_global->__pyx_empty_tuple, NULL); + if (cfunc->flag == METH_VARARGS) + return __Pyx_CallCFunction(cfunc, self, __pyx_mstate_global->__pyx_empty_tuple); + return __Pyx__CallUnboundCMethod0(cfunc, self); + } +#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING + else if (unlikely(was_initialized == 1)) { + __Pyx_CachedCFunction tmp_cfunc = { +#ifndef __cplusplus + 0 +#endif + }; + tmp_cfunc.type = cfunc->type; + tmp_cfunc.method_name = cfunc->method_name; + return __Pyx__CallUnboundCMethod0(&tmp_cfunc, self); + } +#endif + PyObject *result = __Pyx__CallUnboundCMethod0(cfunc, self); + __Pyx_CachedCFunction_SetFinishedInitializing(cfunc); + return result; +} +#endif +static PyObject* __Pyx__CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObject* self) { + PyObject *result; + if (unlikely(!cfunc->method) && unlikely(__Pyx_TryUnpackUnboundCMethod(cfunc) < 0)) return NULL; + result = __Pyx_PyObject_CallOneArg(cfunc->method, self); + return result; +} + +/* py_dict_items (used by OwnedDictNext) */ +static CYTHON_INLINE PyObject* __Pyx_PyDict_Items(PyObject* d) { + return __Pyx_CallUnboundCMethod0(&__pyx_mstate_global->__pyx_umethod_PyDict_Type_items, d); +} + +/* py_dict_values (used by OwnedDictNext) */ +static CYTHON_INLINE PyObject* __Pyx_PyDict_Values(PyObject* d) { + return __Pyx_CallUnboundCMethod0(&__pyx_mstate_global->__pyx_umethod_PyDict_Type_values, d); +} + +/* OwnedDictNext (used by ParseKeywordsImpl) */ +#if CYTHON_AVOID_BORROWED_REFS +static int __Pyx_PyDict_NextRef(PyObject *p, PyObject **ppos, PyObject **pkey, PyObject **pvalue) { + PyObject *next = NULL; + if (!*ppos) { + if (pvalue) { + PyObject *dictview = pkey ? __Pyx_PyDict_Items(p) : __Pyx_PyDict_Values(p); + if (unlikely(!dictview)) goto bad; + *ppos = PyObject_GetIter(dictview); + Py_DECREF(dictview); + } else { + *ppos = PyObject_GetIter(p); + } + if (unlikely(!*ppos)) goto bad; + } + next = PyIter_Next(*ppos); + if (!next) { + if (PyErr_Occurred()) goto bad; + return 0; + } + if (pkey && pvalue) { + *pkey = __Pyx_PySequence_ITEM(next, 0); + if (unlikely(*pkey)) goto bad; + *pvalue = __Pyx_PySequence_ITEM(next, 1); + if (unlikely(*pvalue)) goto bad; + Py_DECREF(next); + } else if (pkey) { + *pkey = next; + } else { + assert(pvalue); + *pvalue = next; + } + return 1; + bad: + Py_XDECREF(next); +#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d0000 + PyErr_FormatUnraisable("Exception ignored in __Pyx_PyDict_NextRef"); +#else + PyErr_WriteUnraisable(__pyx_mstate_global->__pyx_n_u_Pyx_PyDict_NextRef); +#endif + if (pkey) *pkey = NULL; + if (pvalue) *pvalue = NULL; + return 0; +} +#else // !CYTHON_AVOID_BORROWED_REFS +static int __Pyx_PyDict_NextRef(PyObject *p, Py_ssize_t *ppos, PyObject **pkey, PyObject **pvalue) { + int result = PyDict_Next(p, ppos, pkey, pvalue); + if (likely(result == 1)) { + if (pkey) Py_INCREF(*pkey); + if (pvalue) Py_INCREF(*pvalue); + } + return result; +} +#endif + +/* RaiseDoubleKeywords (used by ParseKeywordsImpl) */ +static void __Pyx_RaiseDoubleKeywordsError( + const char* func_name, + PyObject* kw_name) +{ + PyErr_Format(PyExc_TypeError, + "%s() got multiple values for keyword argument '%U'", func_name, kw_name); +} + +/* CallUnboundCMethod2 */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject *__Pyx_CallUnboundCMethod2(__Pyx_CachedCFunction *cfunc, PyObject *self, PyObject *arg1, PyObject *arg2) { + int was_initialized = __Pyx_CachedCFunction_GetAndSetInitializing(cfunc); + if (likely(was_initialized == 2 && cfunc->func)) { + PyObject *args[2] = {arg1, arg2}; + if (cfunc->flag == METH_FASTCALL) { + return __Pyx_CallCFunctionFast(cfunc, self, args, 2); + } + if (cfunc->flag == (METH_FASTCALL | METH_KEYWORDS)) + return __Pyx_CallCFunctionFastWithKeywords(cfunc, self, args, 2, NULL); + } +#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING + else if (unlikely(was_initialized == 1)) { + __Pyx_CachedCFunction tmp_cfunc = { +#ifndef __cplusplus + 0 +#endif + }; + tmp_cfunc.type = cfunc->type; + tmp_cfunc.method_name = cfunc->method_name; + return __Pyx__CallUnboundCMethod2(&tmp_cfunc, self, arg1, arg2); + } +#endif + PyObject *result = __Pyx__CallUnboundCMethod2(cfunc, self, arg1, arg2); + __Pyx_CachedCFunction_SetFinishedInitializing(cfunc); + return result; +} +#endif +static PyObject* __Pyx__CallUnboundCMethod2(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg1, PyObject* arg2){ + if (unlikely(!cfunc->func && !cfunc->method) && unlikely(__Pyx_TryUnpackUnboundCMethod(cfunc) < 0)) return NULL; +#if CYTHON_COMPILING_IN_CPYTHON + if (cfunc->func && (cfunc->flag & METH_VARARGS)) { + PyObject *result = NULL; + PyObject *args = PyTuple_New(2); + if (unlikely(!args)) return NULL; + Py_INCREF(arg1); + PyTuple_SET_ITEM(args, 0, arg1); + Py_INCREF(arg2); + PyTuple_SET_ITEM(args, 1, arg2); + if (cfunc->flag & METH_KEYWORDS) + result = __Pyx_CallCFunctionWithKeywords(cfunc, self, args, NULL); + else + result = __Pyx_CallCFunction(cfunc, self, args); + Py_DECREF(args); + return result; + } +#endif + { + PyObject *args[4] = {NULL, self, arg1, arg2}; + return __Pyx_PyObject_FastCall(cfunc->method, args+1, 3 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET); + } +} + +/* ParseKeywordsImpl (used by ParseKeywords) */ +static int __Pyx_ValidateDuplicatePosArgs( + PyObject *kwds, + PyObject ** const argnames[], + PyObject ** const *first_kw_arg, + const char* function_name) +{ + PyObject ** const *name = argnames; + while (name != first_kw_arg) { + PyObject *key = **name; + int found = PyDict_Contains(kwds, key); + if (unlikely(found)) { + if (found == 1) __Pyx_RaiseDoubleKeywordsError(function_name, key); + goto bad; + } + name++; + } + return 0; +bad: + return -1; +} +#if CYTHON_USE_UNICODE_INTERNALS +static CYTHON_INLINE int __Pyx_UnicodeKeywordsEqual(PyObject *s1, PyObject *s2) { + int kind; + Py_ssize_t len = PyUnicode_GET_LENGTH(s1); + if (len != PyUnicode_GET_LENGTH(s2)) return 0; + kind = PyUnicode_KIND(s1); + if (kind != PyUnicode_KIND(s2)) return 0; + const void *data1 = PyUnicode_DATA(s1); + const void *data2 = PyUnicode_DATA(s2); + return (memcmp(data1, data2, (size_t) len * (size_t) kind) == 0); +} +#endif +static int __Pyx_MatchKeywordArg_str( + PyObject *key, + PyObject ** const argnames[], + PyObject ** const *first_kw_arg, + size_t *index_found, + const char *function_name) +{ + PyObject ** const *name; + #if CYTHON_USE_UNICODE_INTERNALS + Py_hash_t key_hash = ((PyASCIIObject*)key)->hash; + if (unlikely(key_hash == -1)) { + key_hash = PyObject_Hash(key); + if (unlikely(key_hash == -1)) + goto bad; + } + #endif + name = first_kw_arg; + while (*name) { + PyObject *name_str = **name; + #if CYTHON_USE_UNICODE_INTERNALS + if (key_hash == ((PyASCIIObject*)name_str)->hash && __Pyx_UnicodeKeywordsEqual(name_str, key)) { + *index_found = (size_t) (name - argnames); + return 1; + } + #else + #if CYTHON_ASSUME_SAFE_SIZE + if (PyUnicode_GET_LENGTH(name_str) == PyUnicode_GET_LENGTH(key)) + #endif + { + int cmp = PyUnicode_Compare(name_str, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) { + *index_found = (size_t) (name - argnames); + return 1; + } + } + #endif + name++; + } + name = argnames; + while (name != first_kw_arg) { + PyObject *name_str = **name; + #if CYTHON_USE_UNICODE_INTERNALS + if (unlikely(key_hash == ((PyASCIIObject*)name_str)->hash)) { + if (__Pyx_UnicodeKeywordsEqual(name_str, key)) + goto arg_passed_twice; + } + #else + #if CYTHON_ASSUME_SAFE_SIZE + if (PyUnicode_GET_LENGTH(name_str) == PyUnicode_GET_LENGTH(key)) + #endif + { + if (unlikely(name_str == key)) goto arg_passed_twice; + int cmp = PyUnicode_Compare(name_str, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) goto arg_passed_twice; + } + #endif + name++; + } + return 0; +arg_passed_twice: + __Pyx_RaiseDoubleKeywordsError(function_name, key); + goto bad; +bad: + return -1; +} +static int __Pyx_MatchKeywordArg_nostr( + PyObject *key, + PyObject ** const argnames[], + PyObject ** const *first_kw_arg, + size_t *index_found, + const char *function_name) +{ + PyObject ** const *name; + if (unlikely(!PyUnicode_Check(key))) goto invalid_keyword_type; + name = first_kw_arg; + while (*name) { + int cmp = PyObject_RichCompareBool(**name, key, Py_EQ); + if (cmp == 1) { + *index_found = (size_t) (name - argnames); + return 1; + } + if (unlikely(cmp == -1)) goto bad; + name++; + } + name = argnames; + while (name != first_kw_arg) { + int cmp = PyObject_RichCompareBool(**name, key, Py_EQ); + if (unlikely(cmp != 0)) { + if (cmp == 1) goto arg_passed_twice; + else goto bad; + } + name++; + } + return 0; +arg_passed_twice: + __Pyx_RaiseDoubleKeywordsError(function_name, key); + goto bad; +invalid_keyword_type: + PyErr_Format(PyExc_TypeError, + "%.200s() keywords must be strings", function_name); + goto bad; +bad: + return -1; +} +static CYTHON_INLINE int __Pyx_MatchKeywordArg( + PyObject *key, + PyObject ** const argnames[], + PyObject ** const *first_kw_arg, + size_t *index_found, + const char *function_name) +{ + return likely(PyUnicode_CheckExact(key)) ? + __Pyx_MatchKeywordArg_str(key, argnames, first_kw_arg, index_found, function_name) : + __Pyx_MatchKeywordArg_nostr(key, argnames, first_kw_arg, index_found, function_name); +} +static void __Pyx_RejectUnknownKeyword( + PyObject *kwds, + PyObject ** const argnames[], + PyObject ** const *first_kw_arg, + const char *function_name) +{ + #if CYTHON_AVOID_BORROWED_REFS + PyObject *pos = NULL; + #else + Py_ssize_t pos = 0; + #endif + PyObject *key = NULL; + __Pyx_BEGIN_CRITICAL_SECTION(kwds); + while ( + #if CYTHON_AVOID_BORROWED_REFS + __Pyx_PyDict_NextRef(kwds, &pos, &key, NULL) + #else + PyDict_Next(kwds, &pos, &key, NULL) + #endif + ) { + PyObject** const *name = first_kw_arg; + while (*name && (**name != key)) name++; + if (!*name) { + size_t index_found = 0; + int cmp = __Pyx_MatchKeywordArg(key, argnames, first_kw_arg, &index_found, function_name); + if (cmp != 1) { + if (cmp == 0) { + PyErr_Format(PyExc_TypeError, + "%s() got an unexpected keyword argument '%U'", + function_name, key); + } + #if CYTHON_AVOID_BORROWED_REFS + Py_DECREF(key); + #endif + break; + } + } + #if CYTHON_AVOID_BORROWED_REFS + Py_DECREF(key); + #endif + } + __Pyx_END_CRITICAL_SECTION(); + #if CYTHON_AVOID_BORROWED_REFS + Py_XDECREF(pos); + #endif + assert(PyErr_Occurred()); +} +static int __Pyx_ParseKeywordDict( + PyObject *kwds, + PyObject ** const argnames[], + PyObject *values[], + Py_ssize_t num_pos_args, + Py_ssize_t num_kwargs, + const char* function_name, + int ignore_unknown_kwargs) +{ + PyObject** const *name; + PyObject** const *first_kw_arg = argnames + num_pos_args; + Py_ssize_t extracted = 0; +#if !CYTHON_COMPILING_IN_PYPY || defined(PyArg_ValidateKeywordArguments) + if (unlikely(!PyArg_ValidateKeywordArguments(kwds))) return -1; +#endif + name = first_kw_arg; + while (*name && num_kwargs > extracted) { + PyObject * key = **name; + PyObject *value; + int found = 0; + #if __PYX_LIMITED_VERSION_HEX >= 0x030d0000 + found = PyDict_GetItemRef(kwds, key, &value); + #else + value = PyDict_GetItemWithError(kwds, key); + if (value) { + Py_INCREF(value); + found = 1; + } else { + if (unlikely(PyErr_Occurred())) goto bad; + } + #endif + if (found) { + if (unlikely(found < 0)) goto bad; + values[name-argnames] = value; + extracted++; + } + name++; + } + if (num_kwargs > extracted) { + if (ignore_unknown_kwargs) { + if (unlikely(__Pyx_ValidateDuplicatePosArgs(kwds, argnames, first_kw_arg, function_name) == -1)) + goto bad; + } else { + __Pyx_RejectUnknownKeyword(kwds, argnames, first_kw_arg, function_name); + goto bad; + } + } + return 0; +bad: + return -1; +} +static int __Pyx_ParseKeywordDictToDict( + PyObject *kwds, + PyObject ** const argnames[], + PyObject *kwds2, + PyObject *values[], + Py_ssize_t num_pos_args, + const char* function_name) +{ + PyObject** const *name; + PyObject** const *first_kw_arg = argnames + num_pos_args; + Py_ssize_t len; +#if !CYTHON_COMPILING_IN_PYPY || defined(PyArg_ValidateKeywordArguments) + if (unlikely(!PyArg_ValidateKeywordArguments(kwds))) return -1; +#endif + if (PyDict_Update(kwds2, kwds) < 0) goto bad; + name = first_kw_arg; + while (*name) { + PyObject *key = **name; + PyObject *value; +#if !CYTHON_COMPILING_IN_LIMITED_API && (PY_VERSION_HEX >= 0x030d00A2 || defined(PyDict_Pop)) + int found = PyDict_Pop(kwds2, key, &value); + if (found) { + if (unlikely(found < 0)) goto bad; + values[name-argnames] = value; + } +#elif __PYX_LIMITED_VERSION_HEX >= 0x030d0000 + int found = PyDict_GetItemRef(kwds2, key, &value); + if (found) { + if (unlikely(found < 0)) goto bad; + values[name-argnames] = value; + if (unlikely(PyDict_DelItem(kwds2, key) < 0)) goto bad; + } +#else + #if CYTHON_COMPILING_IN_CPYTHON + value = _PyDict_Pop(kwds2, key, kwds2); + #else + value = __Pyx_CallUnboundCMethod2(&__pyx_mstate_global->__pyx_umethod_PyDict_Type_pop, kwds2, key, kwds2); + #endif + if (value == kwds2) { + Py_DECREF(value); + } else { + if (unlikely(!value)) goto bad; + values[name-argnames] = value; + } +#endif + name++; + } + len = PyDict_Size(kwds2); + if (len > 0) { + return __Pyx_ValidateDuplicatePosArgs(kwds, argnames, first_kw_arg, function_name); + } else if (unlikely(len == -1)) { + goto bad; + } + return 0; +bad: + return -1; +} +static int __Pyx_ParseKeywordsTuple( + PyObject *kwds, + PyObject * const *kwvalues, + PyObject ** const argnames[], + PyObject *kwds2, + PyObject *values[], + Py_ssize_t num_pos_args, + Py_ssize_t num_kwargs, + const char* function_name, + int ignore_unknown_kwargs) +{ + PyObject *key = NULL; + PyObject** const * name; + PyObject** const *first_kw_arg = argnames + num_pos_args; + for (Py_ssize_t pos = 0; pos < num_kwargs; pos++) { +#if CYTHON_AVOID_BORROWED_REFS + key = __Pyx_PySequence_ITEM(kwds, pos); +#else + key = __Pyx_PyTuple_GET_ITEM(kwds, pos); +#endif +#if !CYTHON_ASSUME_SAFE_MACROS + if (unlikely(!key)) goto bad; +#endif + name = first_kw_arg; + while (*name && (**name != key)) name++; + if (*name) { + PyObject *value = kwvalues[pos]; + values[name-argnames] = __Pyx_NewRef(value); + } else { + size_t index_found = 0; + int cmp = __Pyx_MatchKeywordArg(key, argnames, first_kw_arg, &index_found, function_name); + if (cmp == 1) { + PyObject *value = kwvalues[pos]; + values[index_found] = __Pyx_NewRef(value); + } else { + if (unlikely(cmp == -1)) goto bad; + if (kwds2) { + PyObject *value = kwvalues[pos]; + if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; + } else if (!ignore_unknown_kwargs) { + goto invalid_keyword; + } + } + } + #if CYTHON_AVOID_BORROWED_REFS + Py_DECREF(key); + key = NULL; + #endif + } + return 0; +invalid_keyword: + PyErr_Format(PyExc_TypeError, + "%s() got an unexpected keyword argument '%U'", + function_name, key); + goto bad; +bad: + #if CYTHON_AVOID_BORROWED_REFS + Py_XDECREF(key); + #endif + return -1; +} + +/* ParseKeywords */ +static int __Pyx_ParseKeywords( + PyObject *kwds, + PyObject * const *kwvalues, + PyObject ** const argnames[], + PyObject *kwds2, + PyObject *values[], + Py_ssize_t num_pos_args, + Py_ssize_t num_kwargs, + const char* function_name, + int ignore_unknown_kwargs) +{ + if (CYTHON_METH_FASTCALL && likely(PyTuple_Check(kwds))) + return __Pyx_ParseKeywordsTuple(kwds, kwvalues, argnames, kwds2, values, num_pos_args, num_kwargs, function_name, ignore_unknown_kwargs); + else if (kwds2) + return __Pyx_ParseKeywordDictToDict(kwds, argnames, kwds2, values, num_pos_args, function_name); + else + return __Pyx_ParseKeywordDict(kwds, argnames, values, num_pos_args, num_kwargs, function_name, ignore_unknown_kwargs); +} + +/* RaiseArgTupleInvalid */ +static void __Pyx_RaiseArgtupleInvalid( + const char* func_name, + int exact, + Py_ssize_t num_min, + Py_ssize_t num_max, + Py_ssize_t num_found) +{ + Py_ssize_t num_expected; + const char *more_or_less; + if (num_found < num_min) { + num_expected = num_min; + more_or_less = "at least"; + } else { + num_expected = num_max; + more_or_less = "at most"; + } + if (exact) { + more_or_less = "exactly"; + } + PyErr_Format(PyExc_TypeError, + "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", + func_name, more_or_less, num_expected, + (num_expected == 1) ? "" : "s", num_found); +} + +/* GetException (used by pep479) */ +#if CYTHON_FAST_THREAD_STATE +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) +#endif +{ + PyObject *local_type = NULL, *local_value, *local_tb = NULL; +#if CYTHON_FAST_THREAD_STATE + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if PY_VERSION_HEX >= 0x030C0000 + local_value = tstate->current_exception; + tstate->current_exception = 0; + #else + local_type = tstate->curexc_type; + local_value = tstate->curexc_value; + local_tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; + #endif +#elif __PYX_LIMITED_VERSION_HEX > 0x030C0000 + local_value = PyErr_GetRaisedException(); +#else + PyErr_Fetch(&local_type, &local_value, &local_tb); +#endif +#if __PYX_LIMITED_VERSION_HEX > 0x030C0000 + if (likely(local_value)) { + local_type = (PyObject*) Py_TYPE(local_value); + Py_INCREF(local_type); + local_tb = PyException_GetTraceback(local_value); + } +#else + PyErr_NormalizeException(&local_type, &local_value, &local_tb); +#if CYTHON_FAST_THREAD_STATE + if (unlikely(tstate->curexc_type)) +#else + if (unlikely(PyErr_Occurred())) +#endif + goto bad; + if (local_tb) { + if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) + goto bad; + } +#endif // __PYX_LIMITED_VERSION_HEX > 0x030C0000 + Py_XINCREF(local_tb); + Py_XINCREF(local_type); + Py_XINCREF(local_value); + *type = local_type; + *value = local_value; + *tb = local_tb; +#if CYTHON_FAST_THREAD_STATE + #if CYTHON_USE_EXC_INFO_STACK + { + _PyErr_StackItem *exc_info = tstate->exc_info; + #if PY_VERSION_HEX >= 0x030B00a4 + tmp_value = exc_info->exc_value; + exc_info->exc_value = local_value; + tmp_type = NULL; + tmp_tb = NULL; + Py_XDECREF(local_type); + Py_XDECREF(local_tb); + #else + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = local_type; + exc_info->exc_value = local_value; + exc_info->exc_traceback = local_tb; + #endif + } + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = local_type; + tstate->exc_value = local_value; + tstate->exc_traceback = local_tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +#elif __PYX_LIMITED_VERSION_HEX >= 0x030b0000 + PyErr_SetHandledException(local_value); + Py_XDECREF(local_value); + Py_XDECREF(local_type); + Py_XDECREF(local_tb); +#else + PyErr_SetExcInfo(local_type, local_value, local_tb); +#endif + return 0; +#if __PYX_LIMITED_VERSION_HEX <= 0x030C0000 +bad: + *type = 0; + *value = 0; + *tb = 0; + Py_XDECREF(local_type); + Py_XDECREF(local_value); + Py_XDECREF(local_tb); + return -1; +#endif +} + +/* pep479 */ +static void __Pyx_Generator_Replace_StopIteration(int in_async_gen) { + PyObject *exc, *val, *tb, *cur_exc, *new_exc; + __Pyx_PyThreadState_declare + int is_async_stopiteration = 0; + CYTHON_MAYBE_UNUSED_VAR(in_async_gen); + __Pyx_PyThreadState_assign + cur_exc = __Pyx_PyErr_CurrentExceptionType(); + if (likely(!__Pyx_PyErr_GivenExceptionMatches(cur_exc, PyExc_StopIteration))) { + if (in_async_gen && unlikely(__Pyx_PyErr_GivenExceptionMatches(cur_exc, PyExc_StopAsyncIteration))) { + is_async_stopiteration = 1; + } else { + return; + } + } + __Pyx_GetException(&exc, &val, &tb); + Py_XDECREF(exc); + Py_XDECREF(tb); + new_exc = PyObject_CallFunction(PyExc_RuntimeError, "s", + is_async_stopiteration ? "async generator raised StopAsyncIteration" : + in_async_gen ? "async generator raised StopIteration" : + "generator raised StopIteration"); + if (!new_exc) { + Py_XDECREF(val); + return; + } + PyException_SetCause(new_exc, val); // steals ref to val + PyErr_SetObject(PyExc_RuntimeError, new_exc); +} + +/* GetTopmostException (used by SaveResetException) */ +#if CYTHON_USE_EXC_INFO_STACK && CYTHON_FAST_THREAD_STATE +static _PyErr_StackItem * +__Pyx_PyErr_GetTopmostException(PyThreadState *tstate) +{ + _PyErr_StackItem *exc_info = tstate->exc_info; + while ((exc_info->exc_value == NULL || exc_info->exc_value == Py_None) && + exc_info->previous_item != NULL) + { + exc_info = exc_info->previous_item; + } + return exc_info; +} +#endif + +/* SaveResetException */ +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + #if CYTHON_USE_EXC_INFO_STACK && PY_VERSION_HEX >= 0x030B00a4 + _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); + PyObject *exc_value = exc_info->exc_value; + if (exc_value == NULL || exc_value == Py_None) { + *value = NULL; + *type = NULL; + *tb = NULL; + } else { + *value = exc_value; + Py_INCREF(*value); + *type = (PyObject*) Py_TYPE(exc_value); + Py_INCREF(*type); + *tb = PyException_GetTraceback(exc_value); + } + #elif CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); + *type = exc_info->exc_type; + *value = exc_info->exc_value; + *tb = exc_info->exc_traceback; + Py_XINCREF(*type); + Py_XINCREF(*value); + Py_XINCREF(*tb); + #else + *type = tstate->exc_type; + *value = tstate->exc_value; + *tb = tstate->exc_traceback; + Py_XINCREF(*type); + Py_XINCREF(*value); + Py_XINCREF(*tb); + #endif +} +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + #if CYTHON_USE_EXC_INFO_STACK && PY_VERSION_HEX >= 0x030B00a4 + _PyErr_StackItem *exc_info = tstate->exc_info; + PyObject *tmp_value = exc_info->exc_value; + exc_info->exc_value = value; + Py_XDECREF(tmp_value); + Py_XDECREF(type); + Py_XDECREF(tb); + #else + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = type; + exc_info->exc_value = value; + exc_info->exc_traceback = tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = type; + tstate->exc_value = value; + tstate->exc_traceback = tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); + #endif +} +#endif + +/* IterNextPlain (used by IterNext) */ +#if CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX < 0x030A0000 +static PyObject *__Pyx_GetBuiltinNext_LimitedAPI(void) { + if (unlikely(!__pyx_mstate_global->__Pyx_GetBuiltinNext_LimitedAPI_cache)) + __pyx_mstate_global->__Pyx_GetBuiltinNext_LimitedAPI_cache = __Pyx_GetBuiltinName(__pyx_mstate_global->__pyx_n_u_next); + return __pyx_mstate_global->__Pyx_GetBuiltinNext_LimitedAPI_cache; +} +#endif +static CYTHON_INLINE PyObject *__Pyx_PyIter_Next_Plain(PyObject *iterator) { +#if CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX < 0x030A0000 + PyObject *result; + PyObject *next = __Pyx_GetBuiltinNext_LimitedAPI(); + if (unlikely(!next)) return NULL; + result = PyObject_CallFunctionObjArgs(next, iterator, NULL); + return result; +#else + (void)__Pyx_GetBuiltinName; // only for early limited API + iternextfunc iternext = __Pyx_PyObject_GetIterNextFunc(iterator); + assert(iternext); + return iternext(iterator); +#endif +} + +/* IterNext */ +static PyObject *__Pyx_PyIter_Next2Default(PyObject* defval) { + PyObject* exc_type; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + exc_type = __Pyx_PyErr_CurrentExceptionType(); + if (unlikely(exc_type)) { + if (!defval || unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) + return NULL; + __Pyx_PyErr_Clear(); + Py_INCREF(defval); + return defval; + } + if (defval) { + Py_INCREF(defval); + return defval; + } + __Pyx_PyErr_SetNone(PyExc_StopIteration); + return NULL; +} +static void __Pyx_PyIter_Next_ErrorNoIterator(PyObject *iterator) { + __Pyx_TypeName iterator_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(iterator)); + PyErr_Format(PyExc_TypeError, + __Pyx_FMT_TYPENAME " object is not an iterator", iterator_type_name); + __Pyx_DECREF_TypeName(iterator_type_name); +} +static CYTHON_INLINE PyObject *__Pyx_PyIter_Next2(PyObject* iterator, PyObject* defval) { + PyObject* next; +#if !CYTHON_COMPILING_IN_LIMITED_API + iternextfunc iternext = __Pyx_PyObject_TryGetSlot(iterator, tp_iternext, iternextfunc); + if (likely(iternext)) { + next = iternext(iterator); + if (likely(next)) + return next; + #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030d0000 + if (unlikely(iternext == &_PyObject_NextNotImplemented)) + return NULL; + #endif + } else if (CYTHON_USE_TYPE_SLOTS) { + __Pyx_PyIter_Next_ErrorNoIterator(iterator); + return NULL; + } else +#endif + if (unlikely(!PyIter_Check(iterator))) { + __Pyx_PyIter_Next_ErrorNoIterator(iterator); + return NULL; + } else { + next = defval ? PyIter_Next(iterator) : __Pyx_PyIter_Next_Plain(iterator); + if (likely(next)) + return next; + } + return __Pyx_PyIter_Next2Default(defval); +} + +/* PyLongBinop */ +#if !CYTHON_COMPILING_IN_PYPY +static PyObject* __Pyx_Fallback___Pyx_PyLong_AddObjC(PyObject *op1, PyObject *op2, int inplace) { + return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); +} +#if CYTHON_USE_PYLONG_INTERNALS +static PyObject* __Pyx_Unpacked___Pyx_PyLong_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check) { + CYTHON_MAYBE_UNUSED_VAR(inplace); + CYTHON_UNUSED_VAR(zerodivision_check); + const long b = intval; + long a; + const PY_LONG_LONG llb = intval; + PY_LONG_LONG lla; + if (unlikely(__Pyx_PyLong_IsZero(op1))) { + return __Pyx_NewRef(op2); + } + const int is_positive = __Pyx_PyLong_IsPos(op1); + const digit* digits = __Pyx_PyLong_Digits(op1); + const Py_ssize_t size = __Pyx_PyLong_DigitCount(op1); + if (likely(size == 1)) { + a = (long) digits[0]; + if (!is_positive) a *= -1; + } else { + switch (size) { + case 2: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + if (!is_positive) a *= -1; + goto calculate_long; + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { + lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + if (!is_positive) lla *= -1; + goto calculate_long_long; + } + break; + case 3: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + if (!is_positive) a *= -1; + goto calculate_long; + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { + lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + if (!is_positive) lla *= -1; + goto calculate_long_long; + } + break; + case 4: + if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + if (!is_positive) a *= -1; + goto calculate_long; + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { + lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + if (!is_positive) lla *= -1; + goto calculate_long_long; + } + break; + } + return PyLong_Type.tp_as_number->nb_add(op1, op2); + } + calculate_long: + { + long x; + x = a + b; + return PyLong_FromLong(x); + } + calculate_long_long: + { + PY_LONG_LONG llx; + llx = lla + llb; + return PyLong_FromLongLong(llx); + } + +} +#endif +static PyObject* __Pyx_Float___Pyx_PyLong_AddObjC(PyObject *float_val, long intval, int zerodivision_check) { + CYTHON_UNUSED_VAR(zerodivision_check); + const long b = intval; + double a = __Pyx_PyFloat_AS_DOUBLE(float_val); + double result; + + result = ((double)a) + (double)b; + return PyFloat_FromDouble(result); +} +static CYTHON_INLINE PyObject* __Pyx_PyLong_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check) { + CYTHON_MAYBE_UNUSED_VAR(intval); + CYTHON_UNUSED_VAR(zerodivision_check); + #if CYTHON_USE_PYLONG_INTERNALS + if (likely(PyLong_CheckExact(op1))) { + return __Pyx_Unpacked___Pyx_PyLong_AddObjC(op1, op2, intval, inplace, zerodivision_check); + } + #endif + if (PyFloat_CheckExact(op1)) { + return __Pyx_Float___Pyx_PyLong_AddObjC(op1, intval, zerodivision_check); + } + return __Pyx_Fallback___Pyx_PyLong_AddObjC(op1, op2, inplace); +} +#endif + +/* RaiseException */ +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { + PyObject* owned_instance = NULL; + if (tb == Py_None) { + tb = 0; + } else if (tb && !PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto bad; + } + if (value == Py_None) + value = 0; + if (PyExceptionInstance_Check(type)) { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto bad; + } + value = type; + type = (PyObject*) Py_TYPE(value); + } else if (PyExceptionClass_Check(type)) { + PyObject *instance_class = NULL; + if (value && PyExceptionInstance_Check(value)) { + instance_class = (PyObject*) Py_TYPE(value); + if (instance_class != type) { + int is_subclass = PyObject_IsSubclass(instance_class, type); + if (!is_subclass) { + instance_class = NULL; + } else if (unlikely(is_subclass == -1)) { + goto bad; + } else { + type = instance_class; + } + } + } + if (!instance_class) { + PyObject *args; + if (!value) + args = PyTuple_New(0); + else if (PyTuple_Check(value)) { + Py_INCREF(value); + args = value; + } else + args = PyTuple_Pack(1, value); + if (!args) + goto bad; + owned_instance = PyObject_Call(type, args, NULL); + Py_DECREF(args); + if (!owned_instance) + goto bad; + value = owned_instance; + if (!PyExceptionInstance_Check(value)) { + PyErr_Format(PyExc_TypeError, + "calling %R should have returned an instance of " + "BaseException, not %R", + type, Py_TYPE(value)); + goto bad; + } + } + } else { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto bad; + } + if (cause) { + PyObject *fixed_cause; + if (cause == Py_None) { + fixed_cause = NULL; + } else if (PyExceptionClass_Check(cause)) { + fixed_cause = PyObject_CallObject(cause, NULL); + if (fixed_cause == NULL) + goto bad; + } else if (PyExceptionInstance_Check(cause)) { + fixed_cause = cause; + Py_INCREF(fixed_cause); + } else { + PyErr_SetString(PyExc_TypeError, + "exception causes must derive from " + "BaseException"); + goto bad; + } + PyException_SetCause(value, fixed_cause); + } + PyErr_SetObject(type, value); + if (tb) { +#if PY_VERSION_HEX >= 0x030C00A6 + PyException_SetTraceback(value, tb); +#elif CYTHON_FAST_THREAD_STATE + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject* tmp_tb = tstate->curexc_traceback; + if (tb != tmp_tb) { + Py_INCREF(tb); + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_tb); + } +#else + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); + Py_INCREF(tb); + PyErr_Restore(tmp_type, tmp_value, tb); + Py_XDECREF(tmp_tb); +#endif + } +bad: + Py_XDECREF(owned_instance); + return; +} + +/* SetItemInt */ +static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) { + int r; + if (unlikely(!j)) return -1; + r = PyObject_SetItem(o, j, v); + Py_DECREF(j); + return r; +} +static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, int is_list, + int wraparound, int boundscheck, int unsafe_shared) { + CYTHON_MAYBE_UNUSED_VAR(unsafe_shared); +#if CYTHON_ASSUME_SAFE_MACROS && CYTHON_ASSUME_SAFE_SIZE && !CYTHON_AVOID_BORROWED_REFS + if (is_list || PyList_CheckExact(o)) { + Py_ssize_t n = (!wraparound) ? i : ((likely(i >= 0)) ? i : i + PyList_GET_SIZE(o)); + if ((CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS && !__Pyx_IS_UNIQUELY_REFERENCED(o, unsafe_shared))) { + Py_INCREF(v); + return PyList_SetItem(o, n, v); + } else if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o)))) { + PyObject* old; + Py_INCREF(v); + old = PyList_GET_ITEM(o, n); + PyList_SET_ITEM(o, n, v); + Py_DECREF(old); + return 0; + } + } else +#endif +#if CYTHON_USE_TYPE_SLOTS && !CYTHON_COMPILING_IN_PYPY + { + PyMappingMethods *mm = Py_TYPE(o)->tp_as_mapping; + PySequenceMethods *sm = Py_TYPE(o)->tp_as_sequence; + if (!is_list && mm && mm->mp_ass_subscript) { + int r; + PyObject *key = PyLong_FromSsize_t(i); + if (unlikely(!key)) return -1; + r = mm->mp_ass_subscript(o, key, v); + Py_DECREF(key); + return r; + } + if (is_list || likely(sm && sm->sq_ass_item)) { + if (wraparound && unlikely(i < 0) && likely(sm->sq_length)) { + Py_ssize_t l = sm->sq_length(o); + if (likely(l >= 0)) { + i += l; + } else { + if (!PyErr_ExceptionMatches(PyExc_OverflowError)) + return -1; + PyErr_Clear(); + } + } + return sm->sq_ass_item(o, i, v); + } + } +#else + if (is_list || !PyMapping_Check(o)) { + return PySequence_SetItem(o, i, v); + } +#endif + (void)wraparound; + (void)boundscheck; + return __Pyx_SetItemInt_Generic(o, PyLong_FromSsize_t(i), v); +} + +/* ModInt[long] */ +static CYTHON_INLINE long __Pyx_mod_long(long a, long b, int b_is_constant) { + long r = a % b; + long adapt_python = (b_is_constant ? + ((r != 0) & ((r < 0) ^ (b < 0))) : + ((r != 0) & ((r ^ b) < 0)) + ); + return r + adapt_python * b; +} + +/* AllocateExtensionType */ +static PyObject *__Pyx_AllocateExtensionType(PyTypeObject *t, int is_final) { + if (is_final || likely(!__Pyx_PyType_HasFeature(t, Py_TPFLAGS_IS_ABSTRACT))) { + allocfunc alloc_func = __Pyx_PyType_GetSlot(t, tp_alloc, allocfunc); + return alloc_func(t, 0); + } else { + newfunc tp_new = __Pyx_PyType_TryGetSlot(&PyBaseObject_Type, tp_new, newfunc); + #if CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX < 0x030A0000 + if (!tp_new) { + PyObject *new_str = PyUnicode_FromString("__new__"); + if (likely(new_str)) { + PyObject *o = PyObject_CallMethodObjArgs((PyObject *)&PyBaseObject_Type, new_str, t, NULL); + Py_DECREF(new_str); + return o; + } else + return NULL; + } else + #endif + return tp_new(t, __pyx_mstate_global->__pyx_empty_tuple, 0); + } +} + +/* LimitedApiGetTypeDict (used by SetItemOnTypeDict) */ +#if CYTHON_COMPILING_IN_LIMITED_API +static Py_ssize_t __Pyx_GetTypeDictOffset(void) { + PyObject *tp_dictoffset_o; + Py_ssize_t tp_dictoffset; + tp_dictoffset_o = PyObject_GetAttrString((PyObject*)(&PyType_Type), "__dictoffset__"); + if (unlikely(!tp_dictoffset_o)) return -1; + tp_dictoffset = PyLong_AsSsize_t(tp_dictoffset_o); + Py_DECREF(tp_dictoffset_o); + if (unlikely(tp_dictoffset == 0)) { + PyErr_SetString( + PyExc_TypeError, + "'type' doesn't have a dictoffset"); + return -1; + } else if (unlikely(tp_dictoffset < 0)) { + PyErr_SetString( + PyExc_TypeError, + "'type' has an unexpected negative dictoffset. " + "Please report this as Cython bug"); + return -1; + } + return tp_dictoffset; +} +static PyObject *__Pyx_GetTypeDict(PyTypeObject *tp) { + static Py_ssize_t tp_dictoffset = 0; + if (unlikely(tp_dictoffset == 0)) { + tp_dictoffset = __Pyx_GetTypeDictOffset(); + if (unlikely(tp_dictoffset == -1 && PyErr_Occurred())) { + tp_dictoffset = 0; // try again next time? + return NULL; + } + } + return *(PyObject**)((char*)tp + tp_dictoffset); +} +#endif + +/* SetItemOnTypeDict (used by FixUpExtensionType) */ +static int __Pyx__SetItemOnTypeDict(PyTypeObject *tp, PyObject *k, PyObject *v) { + int result; + PyObject *tp_dict; +#if CYTHON_COMPILING_IN_LIMITED_API + tp_dict = __Pyx_GetTypeDict(tp); + if (unlikely(!tp_dict)) return -1; +#else + tp_dict = tp->tp_dict; +#endif + result = PyDict_SetItem(tp_dict, k, v); + if (likely(!result)) { + PyType_Modified(tp); + if (unlikely(PyObject_HasAttr(v, __pyx_mstate_global->__pyx_n_u_set_name))) { + PyObject *setNameResult = PyObject_CallMethodObjArgs(v, __pyx_mstate_global->__pyx_n_u_set_name, (PyObject *) tp, k, NULL); + if (!setNameResult) return -1; + Py_DECREF(setNameResult); + } + } + return result; +} + +/* FixUpExtensionType */ +static int __Pyx_fix_up_extension_type_from_spec(PyType_Spec *spec, PyTypeObject *type) { +#if __PYX_LIMITED_VERSION_HEX > 0x030900B1 + CYTHON_UNUSED_VAR(spec); + CYTHON_UNUSED_VAR(type); + CYTHON_UNUSED_VAR(__Pyx__SetItemOnTypeDict); +#else + const PyType_Slot *slot = spec->slots; + int changed = 0; +#if !CYTHON_COMPILING_IN_LIMITED_API + while (slot && slot->slot && slot->slot != Py_tp_members) + slot++; + if (slot && slot->slot == Py_tp_members) { +#if !CYTHON_COMPILING_IN_CPYTHON + const +#endif // !CYTHON_COMPILING_IN_CPYTHON) + PyMemberDef *memb = (PyMemberDef*) slot->pfunc; + while (memb && memb->name) { + if (memb->name[0] == '_' && memb->name[1] == '_') { + if (strcmp(memb->name, "__weaklistoffset__") == 0) { + assert(memb->type == T_PYSSIZET); + assert(memb->flags == READONLY); + type->tp_weaklistoffset = memb->offset; + changed = 1; + } + else if (strcmp(memb->name, "__dictoffset__") == 0) { + assert(memb->type == T_PYSSIZET); + assert(memb->flags == READONLY); + type->tp_dictoffset = memb->offset; + changed = 1; + } +#if CYTHON_METH_FASTCALL + else if (strcmp(memb->name, "__vectorcalloffset__") == 0) { + assert(memb->type == T_PYSSIZET); + assert(memb->flags == READONLY); + type->tp_vectorcall_offset = memb->offset; + changed = 1; + } +#endif // CYTHON_METH_FASTCALL +#if !CYTHON_COMPILING_IN_PYPY + else if (strcmp(memb->name, "__module__") == 0) { + PyObject *descr; + assert(memb->type == T_OBJECT); + assert(memb->flags == 0 || memb->flags == READONLY); + descr = PyDescr_NewMember(type, memb); + if (unlikely(!descr)) + return -1; + int set_item_result = PyDict_SetItem(type->tp_dict, PyDescr_NAME(descr), descr); + Py_DECREF(descr); + if (unlikely(set_item_result < 0)) { + return -1; + } + changed = 1; + } +#endif // !CYTHON_COMPILING_IN_PYPY + } + memb++; + } + } +#endif // !CYTHON_COMPILING_IN_LIMITED_API +#if !CYTHON_COMPILING_IN_PYPY + slot = spec->slots; + while (slot && slot->slot && slot->slot != Py_tp_getset) + slot++; + if (slot && slot->slot == Py_tp_getset) { + PyGetSetDef *getset = (PyGetSetDef*) slot->pfunc; + while (getset && getset->name) { + if (getset->name[0] == '_' && getset->name[1] == '_' && strcmp(getset->name, "__module__") == 0) { + PyObject *descr = PyDescr_NewGetSet(type, getset); + if (unlikely(!descr)) + return -1; + #if CYTHON_COMPILING_IN_LIMITED_API + PyObject *pyname = PyUnicode_FromString(getset->name); + if (unlikely(!pyname)) { + Py_DECREF(descr); + return -1; + } + int set_item_result = __Pyx_SetItemOnTypeDict(type, pyname, descr); + Py_DECREF(pyname); + #else + CYTHON_UNUSED_VAR(__Pyx__SetItemOnTypeDict); + int set_item_result = PyDict_SetItem(type->tp_dict, PyDescr_NAME(descr), descr); + #endif + Py_DECREF(descr); + if (unlikely(set_item_result < 0)) { + return -1; + } + changed = 1; + } + ++getset; + } + } +#else + CYTHON_UNUSED_VAR(__Pyx__SetItemOnTypeDict); +#endif // !CYTHON_COMPILING_IN_PYPY + if (changed) + PyType_Modified(type); +#endif // PY_VERSION_HEX > 0x030900B1 + return 0; +} + +/* PyObjectCallNoArg (used by PyObjectCallMethod0) */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { + PyObject *arg[2] = {NULL, NULL}; + return __Pyx_PyObject_FastCall(func, arg + 1, 0 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET); +} + +/* PyObjectGetMethod (used by PyObjectCallMethod0) */ +#if !(CYTHON_VECTORCALL && (__PYX_LIMITED_VERSION_HEX >= 0x030C0000 || (!CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x03090000))) +static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method) { + PyObject *attr; +#if CYTHON_UNPACK_METHODS && CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_PYTYPE_LOOKUP + __Pyx_TypeName type_name; + PyTypeObject *tp = Py_TYPE(obj); + PyObject *descr; + descrgetfunc f = NULL; + PyObject **dictptr, *dict; + int meth_found = 0; + assert (*method == NULL); + if (unlikely(tp->tp_getattro != PyObject_GenericGetAttr)) { + attr = __Pyx_PyObject_GetAttrStr(obj, name); + goto try_unpack; + } + if (unlikely(tp->tp_dict == NULL) && unlikely(PyType_Ready(tp) < 0)) { + return 0; + } + descr = _PyType_Lookup(tp, name); + if (likely(descr != NULL)) { + Py_INCREF(descr); +#if defined(Py_TPFLAGS_METHOD_DESCRIPTOR) && Py_TPFLAGS_METHOD_DESCRIPTOR + if (__Pyx_PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_METHOD_DESCRIPTOR)) +#else + #ifdef __Pyx_CyFunction_USED + if (likely(PyFunction_Check(descr) || __Pyx_IS_TYPE(descr, &PyMethodDescr_Type) || __Pyx_CyFunction_Check(descr))) + #else + if (likely(PyFunction_Check(descr) || __Pyx_IS_TYPE(descr, &PyMethodDescr_Type))) + #endif +#endif + { + meth_found = 1; + } else { + f = Py_TYPE(descr)->tp_descr_get; + if (f != NULL && PyDescr_IsData(descr)) { + attr = f(descr, obj, (PyObject *)Py_TYPE(obj)); + Py_DECREF(descr); + goto try_unpack; + } + } + } + dictptr = _PyObject_GetDictPtr(obj); + if (dictptr != NULL && (dict = *dictptr) != NULL) { + Py_INCREF(dict); + attr = __Pyx_PyDict_GetItemStr(dict, name); + if (attr != NULL) { + Py_INCREF(attr); + Py_DECREF(dict); + Py_XDECREF(descr); + goto try_unpack; + } + Py_DECREF(dict); + } + if (meth_found) { + *method = descr; + return 1; + } + if (f != NULL) { + attr = f(descr, obj, (PyObject *)Py_TYPE(obj)); + Py_DECREF(descr); + goto try_unpack; + } + if (likely(descr != NULL)) { + *method = descr; + return 0; + } + type_name = __Pyx_PyType_GetFullyQualifiedName(tp); + PyErr_Format(PyExc_AttributeError, + "'" __Pyx_FMT_TYPENAME "' object has no attribute '%U'", + type_name, name); + __Pyx_DECREF_TypeName(type_name); + return 0; +#else + attr = __Pyx_PyObject_GetAttrStr(obj, name); + goto try_unpack; +#endif +try_unpack: +#if CYTHON_UNPACK_METHODS + if (likely(attr) && PyMethod_Check(attr) && likely(PyMethod_GET_SELF(attr) == obj)) { + PyObject *function = PyMethod_GET_FUNCTION(attr); + Py_INCREF(function); + Py_DECREF(attr); + *method = function; + return 1; + } +#endif + *method = attr; + return 0; +} +#endif + +/* PyObjectCallMethod0 (used by PyType_Ready) */ +static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name) { +#if CYTHON_VECTORCALL && (__PYX_LIMITED_VERSION_HEX >= 0x030C0000 || (!CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x03090000)) + PyObject *args[1] = {obj}; + (void) __Pyx_PyObject_CallOneArg; + (void) __Pyx_PyObject_CallNoArg; + return PyObject_VectorcallMethod(method_name, args, 1 | PY_VECTORCALL_ARGUMENTS_OFFSET, NULL); +#else + PyObject *method = NULL, *result = NULL; + int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method); + if (likely(is_method)) { + result = __Pyx_PyObject_CallOneArg(method, obj); + Py_DECREF(method); + return result; + } + if (unlikely(!method)) goto bad; + result = __Pyx_PyObject_CallNoArg(method); + Py_DECREF(method); +bad: + return result; +#endif +} + +/* ValidateBasesTuple (used by PyType_Ready) */ +#if CYTHON_COMPILING_IN_CPYTHON || CYTHON_COMPILING_IN_LIMITED_API || CYTHON_USE_TYPE_SPECS +static int __Pyx_validate_bases_tuple(const char *type_name, Py_ssize_t dictoffset, PyObject *bases) { + Py_ssize_t i, n; +#if CYTHON_ASSUME_SAFE_SIZE + n = PyTuple_GET_SIZE(bases); +#else + n = PyTuple_Size(bases); + if (unlikely(n < 0)) return -1; +#endif + for (i = 1; i < n; i++) + { + PyTypeObject *b; +#if CYTHON_AVOID_BORROWED_REFS + PyObject *b0 = PySequence_GetItem(bases, i); + if (!b0) return -1; +#elif CYTHON_ASSUME_SAFE_MACROS + PyObject *b0 = PyTuple_GET_ITEM(bases, i); +#else + PyObject *b0 = PyTuple_GetItem(bases, i); + if (!b0) return -1; +#endif + b = (PyTypeObject*) b0; + if (!__Pyx_PyType_HasFeature(b, Py_TPFLAGS_HEAPTYPE)) + { + __Pyx_TypeName b_name = __Pyx_PyType_GetFullyQualifiedName(b); + PyErr_Format(PyExc_TypeError, + "base class '" __Pyx_FMT_TYPENAME "' is not a heap type", b_name); + __Pyx_DECREF_TypeName(b_name); +#if CYTHON_AVOID_BORROWED_REFS + Py_DECREF(b0); +#endif + return -1; + } + if (dictoffset == 0) + { + Py_ssize_t b_dictoffset = 0; +#if CYTHON_USE_TYPE_SLOTS + b_dictoffset = b->tp_dictoffset; +#else + PyObject *py_b_dictoffset = PyObject_GetAttrString((PyObject*)b, "__dictoffset__"); + if (!py_b_dictoffset) goto dictoffset_return; + b_dictoffset = PyLong_AsSsize_t(py_b_dictoffset); + Py_DECREF(py_b_dictoffset); + if (b_dictoffset == -1 && PyErr_Occurred()) goto dictoffset_return; +#endif + if (b_dictoffset) { + { + __Pyx_TypeName b_name = __Pyx_PyType_GetFullyQualifiedName(b); + PyErr_Format(PyExc_TypeError, + "extension type '%.200s' has no __dict__ slot, " + "but base type '" __Pyx_FMT_TYPENAME "' has: " + "either add 'cdef dict __dict__' to the extension type " + "or add '__slots__ = [...]' to the base type", + type_name, b_name); + __Pyx_DECREF_TypeName(b_name); + } +#if !CYTHON_USE_TYPE_SLOTS + dictoffset_return: +#endif +#if CYTHON_AVOID_BORROWED_REFS + Py_DECREF(b0); +#endif + return -1; + } + } +#if CYTHON_AVOID_BORROWED_REFS + Py_DECREF(b0); +#endif + } + return 0; +} +#endif + +/* PyType_Ready */ +CYTHON_UNUSED static int __Pyx_PyType_HasMultipleInheritance(PyTypeObject *t) { + while (t) { + PyObject *bases = __Pyx_PyType_GetSlot(t, tp_bases, PyObject*); + if (bases) { + return 1; + } + t = __Pyx_PyType_GetSlot(t, tp_base, PyTypeObject*); + } + return 0; +} +static int __Pyx_PyType_Ready(PyTypeObject *t) { +#if CYTHON_USE_TYPE_SPECS || !CYTHON_COMPILING_IN_CPYTHON || defined(PYSTON_MAJOR_VERSION) + (void)__Pyx_PyObject_CallMethod0; +#if CYTHON_USE_TYPE_SPECS + (void)__Pyx_validate_bases_tuple; +#endif + return PyType_Ready(t); +#else + int r; + if (!__Pyx_PyType_HasMultipleInheritance(t)) { + return PyType_Ready(t); + } + PyObject *bases = __Pyx_PyType_GetSlot(t, tp_bases, PyObject*); + if (bases && unlikely(__Pyx_validate_bases_tuple(t->tp_name, t->tp_dictoffset, bases) == -1)) + return -1; +#if !defined(PYSTON_MAJOR_VERSION) + { + int gc_was_enabled; + #if PY_VERSION_HEX >= 0x030A00b1 + gc_was_enabled = PyGC_Disable(); + (void)__Pyx_PyObject_CallMethod0; + #else + PyObject *ret, *py_status; + PyObject *gc = NULL; + #if (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM+0 >= 0x07030400) &&\ + !CYTHON_COMPILING_IN_GRAAL + gc = PyImport_GetModule(__pyx_mstate_global->__pyx_kp_u_gc); + #endif + if (unlikely(!gc)) gc = PyImport_Import(__pyx_mstate_global->__pyx_kp_u_gc); + if (unlikely(!gc)) return -1; + py_status = __Pyx_PyObject_CallMethod0(gc, __pyx_mstate_global->__pyx_kp_u_isenabled); + if (unlikely(!py_status)) { + Py_DECREF(gc); + return -1; + } + gc_was_enabled = __Pyx_PyObject_IsTrue(py_status); + Py_DECREF(py_status); + if (gc_was_enabled > 0) { + ret = __Pyx_PyObject_CallMethod0(gc, __pyx_mstate_global->__pyx_kp_u_disable); + if (unlikely(!ret)) { + Py_DECREF(gc); + return -1; + } + Py_DECREF(ret); + } else if (unlikely(gc_was_enabled == -1)) { + Py_DECREF(gc); + return -1; + } + #endif + t->tp_flags |= Py_TPFLAGS_HEAPTYPE; +#if PY_VERSION_HEX >= 0x030A0000 + t->tp_flags |= Py_TPFLAGS_IMMUTABLETYPE; +#endif +#else + (void)__Pyx_PyObject_CallMethod0; +#endif + r = PyType_Ready(t); +#if !defined(PYSTON_MAJOR_VERSION) + t->tp_flags &= ~Py_TPFLAGS_HEAPTYPE; + #if PY_VERSION_HEX >= 0x030A00b1 + if (gc_was_enabled) + PyGC_Enable(); + #else + if (gc_was_enabled) { + PyObject *tp, *v, *tb; + PyErr_Fetch(&tp, &v, &tb); + ret = __Pyx_PyObject_CallMethod0(gc, __pyx_mstate_global->__pyx_kp_u_enable); + if (likely(ret || r == -1)) { + Py_XDECREF(ret); + PyErr_Restore(tp, v, tb); + } else { + Py_XDECREF(tp); + Py_XDECREF(v); + Py_XDECREF(tb); + r = -1; + } + } + Py_DECREF(gc); + #endif + } +#endif + return r; +#endif +} + +/* HasAttr (used by ImportImpl) */ +#if __PYX_LIMITED_VERSION_HEX < 0x030d0000 +static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { + PyObject *r; + if (unlikely(!PyUnicode_Check(n))) { + PyErr_SetString(PyExc_TypeError, + "hasattr(): attribute name must be string"); + return -1; + } + r = __Pyx_PyObject_GetAttrStrNoError(o, n); + if (!r) { + return (unlikely(PyErr_Occurred())) ? -1 : 0; + } else { + Py_DECREF(r); + return 1; + } +} +#endif + +/* ImportImpl (used by Import) */ +static int __Pyx__Import_GetModule(PyObject *qualname, PyObject **module) { + PyObject *imported_module = PyImport_GetModule(qualname); + if (unlikely(!imported_module)) { + *module = NULL; + if (PyErr_Occurred()) { + return -1; + } + return 0; + } + *module = imported_module; + return 1; +} +static int __Pyx__Import_Lookup(PyObject *qualname, PyObject *const *imported_names, Py_ssize_t len_imported_names, PyObject **module) { + PyObject *imported_module; + PyObject *top_level_package_name; + Py_ssize_t i; + int status, module_found; + Py_ssize_t dot_index; + module_found = __Pyx__Import_GetModule(qualname, &imported_module); + if (unlikely(!module_found || module_found == -1)) { + *module = NULL; + return module_found; + } + if (imported_names) { + for (i = 0; i < len_imported_names; i++) { + PyObject *imported_name = imported_names[i]; +#if __PYX_LIMITED_VERSION_HEX < 0x030d0000 + int has_imported_attribute = PyObject_HasAttr(imported_module, imported_name); +#else + int has_imported_attribute = PyObject_HasAttrWithError(imported_module, imported_name); + if (unlikely(has_imported_attribute == -1)) goto error; +#endif + if (!has_imported_attribute) { + goto not_found; + } + } + *module = imported_module; + return 1; + } + dot_index = PyUnicode_FindChar(qualname, '.', 0, PY_SSIZE_T_MAX, 1); + if (dot_index == -1) { + *module = imported_module; + return 1; + } + if (unlikely(dot_index == -2)) goto error; + top_level_package_name = PyUnicode_Substring(qualname, 0, dot_index); + if (unlikely(!top_level_package_name)) goto error; + Py_DECREF(imported_module); + status = __Pyx__Import_GetModule(top_level_package_name, module); + Py_DECREF(top_level_package_name); + return status; +error: + Py_DECREF(imported_module); + *module = NULL; + return -1; +not_found: + Py_DECREF(imported_module); + *module = NULL; + return 0; +} +static PyObject *__Pyx__Import(PyObject *name, PyObject *const *imported_names, Py_ssize_t len_imported_names, PyObject *qualname, PyObject *moddict, int level) { + PyObject *module = 0; + PyObject *empty_dict = 0; + PyObject *from_list = 0; + int module_found; + if (!qualname) { + qualname = name; + } + module_found = __Pyx__Import_Lookup(qualname, imported_names, len_imported_names, &module); + if (likely(module_found == 1)) { + return module; + } else if (unlikely(module_found == -1)) { + return NULL; + } + empty_dict = PyDict_New(); + if (unlikely(!empty_dict)) + goto bad; + if (imported_names) { +#if CYTHON_COMPILING_IN_CPYTHON + from_list = __Pyx_PyList_FromArray(imported_names, len_imported_names); + if (unlikely(!from_list)) + goto bad; +#else + from_list = PyList_New(len_imported_names); + if (unlikely(!from_list)) goto bad; + for (Py_ssize_t i=0; i__pyx_d, level); +} + +/* ImportFrom */ +static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { + PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); + if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { + const char* module_name_str = 0; + PyObject* module_name = 0; + PyObject* module_dot = 0; + PyObject* full_name = 0; + PyErr_Clear(); + module_name_str = PyModule_GetName(module); + if (unlikely(!module_name_str)) { goto modbad; } + module_name = PyUnicode_FromString(module_name_str); + if (unlikely(!module_name)) { goto modbad; } + module_dot = PyUnicode_Concat(module_name, __pyx_mstate_global->__pyx_kp_u_); + if (unlikely(!module_dot)) { goto modbad; } + full_name = PyUnicode_Concat(module_dot, name); + if (unlikely(!full_name)) { goto modbad; } + #if (CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM < 0x07030400) ||\ + CYTHON_COMPILING_IN_GRAAL + { + PyObject *modules = PyImport_GetModuleDict(); + if (unlikely(!modules)) + goto modbad; + value = PyObject_GetItem(modules, full_name); + } + #else + value = PyImport_GetModule(full_name); + #endif + modbad: + Py_XDECREF(full_name); + Py_XDECREF(module_dot); + Py_XDECREF(module_name); + } + if (unlikely(!value)) { + PyErr_Format(PyExc_ImportError, "cannot import name %S", name); + } + return value; +} + +/* ListPack */ +static PyObject *__Pyx_PyList_Pack(Py_ssize_t n, ...) { + va_list va; + PyObject *l = PyList_New(n); + va_start(va, n); + if (unlikely(!l)) goto end; + for (Py_ssize_t i=0; i 0xd)); +} +CYTHON_UNUSED static double __Pyx__PyBytes_AsDouble(PyObject *obj, const char* start, Py_ssize_t length) { + double value; + Py_ssize_t i, digits; + const char *last = start + length; + char *end; + while (__Pyx__PyBytes_AsDouble_IsSpace(*start)) + start++; + while (start < last - 1 && __Pyx__PyBytes_AsDouble_IsSpace(last[-1])) + last--; + length = last - start; + if (unlikely(length <= 0)) goto fallback; + value = __Pyx__PyBytes_AsDouble_inf_nan(start, length); + if (unlikely(value == -1.0)) goto fallback; + if (value != 0.0) return value; + digits = 0; + for (i=0; i < length; digits += start[i++] != '_'); + if (likely(digits == length)) { + value = PyOS_string_to_double(start, &end, NULL); + } else if (digits < 40) { + char number[40]; + last = __Pyx__PyBytes_AsDouble_Copy(start, number, length); + if (unlikely(!last)) goto fallback; + value = PyOS_string_to_double(number, &end, NULL); + } else { + char *number = (char*) PyMem_Malloc((digits + 1) * sizeof(char)); + if (unlikely(!number)) goto fallback; + last = __Pyx__PyBytes_AsDouble_Copy(start, number, length); + if (unlikely(!last)) { + PyMem_Free(number); + goto fallback; + } + value = PyOS_string_to_double(number, &end, NULL); + PyMem_Free(number); + } + if (likely(end == last) || (value == (double)-1 && PyErr_Occurred())) { + return value; + } +fallback: + return __Pyx_SlowPyString_AsDouble(obj); +} + +/* dict_setdefault (used by FetchCommonType) */ +static CYTHON_INLINE PyObject *__Pyx_PyDict_SetDefault(PyObject *d, PyObject *key, PyObject *default_value) { + PyObject* value; +#if CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX >= 0x030C0000 + PyObject *args[] = {d, key, default_value}; + value = PyObject_VectorcallMethod(__pyx_mstate_global->__pyx_n_u_setdefault, args, 3 | PY_VECTORCALL_ARGUMENTS_OFFSET, NULL); +#elif CYTHON_COMPILING_IN_LIMITED_API + value = PyObject_CallMethodObjArgs(d, __pyx_mstate_global->__pyx_n_u_setdefault, key, default_value, NULL); +#elif PY_VERSION_HEX >= 0x030d0000 + PyDict_SetDefaultRef(d, key, default_value, &value); +#else + value = PyDict_SetDefault(d, key, default_value); + if (unlikely(!value)) return NULL; + Py_INCREF(value); +#endif + return value; +} + +/* AddModuleRef (used by FetchSharedCythonModule) */ +#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING + static PyObject *__Pyx_PyImport_AddModuleObjectRef(PyObject *name) { + PyObject *module_dict = PyImport_GetModuleDict(); + PyObject *m; + if (PyMapping_GetOptionalItem(module_dict, name, &m) < 0) { + return NULL; + } + if (m != NULL && PyModule_Check(m)) { + return m; + } + Py_XDECREF(m); + m = PyModule_NewObject(name); + if (m == NULL) + return NULL; + if (PyDict_CheckExact(module_dict)) { + PyObject *new_m; + (void)PyDict_SetDefaultRef(module_dict, name, m, &new_m); + Py_DECREF(m); + return new_m; + } else { + if (PyObject_SetItem(module_dict, name, m) != 0) { + Py_DECREF(m); + return NULL; + } + return m; + } + } + static PyObject *__Pyx_PyImport_AddModuleRef(const char *name) { + PyObject *py_name = PyUnicode_FromString(name); + if (!py_name) return NULL; + PyObject *module = __Pyx_PyImport_AddModuleObjectRef(py_name); + Py_DECREF(py_name); + return module; + } +#elif __PYX_LIMITED_VERSION_HEX >= 0x030d0000 + #define __Pyx_PyImport_AddModuleRef(name) PyImport_AddModuleRef(name) +#else + static PyObject *__Pyx_PyImport_AddModuleRef(const char *name) { + PyObject *module = PyImport_AddModule(name); + Py_XINCREF(module); + return module; + } +#endif + +/* FetchSharedCythonModule (used by FetchCommonType) */ +static PyObject *__Pyx_FetchSharedCythonABIModule(void) { + return __Pyx_PyImport_AddModuleRef(__PYX_ABI_MODULE_NAME); +} + +/* FetchCommonType (used by CommonTypesMetaclass) */ +#if __PYX_LIMITED_VERSION_HEX < 0x030C0000 +static PyObject* __Pyx_PyType_FromMetaclass(PyTypeObject *metaclass, PyObject *module, PyType_Spec *spec, PyObject *bases) { + PyObject *result = __Pyx_PyType_FromModuleAndSpec(module, spec, bases); + if (result && metaclass) { + PyObject *old_tp = (PyObject*)Py_TYPE(result); + Py_INCREF((PyObject*)metaclass); +#if __PYX_LIMITED_VERSION_HEX >= 0x03090000 + Py_SET_TYPE(result, metaclass); +#else + result->ob_type = metaclass; +#endif + Py_DECREF(old_tp); + } + return result; +} +#else +#define __Pyx_PyType_FromMetaclass(me, mo, s, b) PyType_FromMetaclass(me, mo, s, b) +#endif +static int __Pyx_VerifyCachedType(PyObject *cached_type, + const char *name, + Py_ssize_t expected_basicsize) { + Py_ssize_t basicsize; + if (!PyType_Check(cached_type)) { + PyErr_Format(PyExc_TypeError, + "Shared Cython type %.200s is not a type object", name); + return -1; + } + if (expected_basicsize == 0) { + return 0; // size is inherited, nothing useful to check + } +#if CYTHON_COMPILING_IN_LIMITED_API + PyObject *py_basicsize; + py_basicsize = PyObject_GetAttrString(cached_type, "__basicsize__"); + if (unlikely(!py_basicsize)) return -1; + basicsize = PyLong_AsSsize_t(py_basicsize); + Py_DECREF(py_basicsize); + py_basicsize = NULL; + if (unlikely(basicsize == (Py_ssize_t)-1) && PyErr_Occurred()) return -1; +#else + basicsize = ((PyTypeObject*) cached_type)->tp_basicsize; +#endif + if (basicsize != expected_basicsize) { + PyErr_Format(PyExc_TypeError, + "Shared Cython type %.200s has the wrong size, try recompiling", + name); + return -1; + } + return 0; +} +static PyTypeObject *__Pyx_FetchCommonTypeFromSpec(PyTypeObject *metaclass, PyObject *module, PyType_Spec *spec, PyObject *bases) { + PyObject *abi_module = NULL, *cached_type = NULL, *abi_module_dict, *new_cached_type, *py_object_name; + int get_item_ref_result; + const char* object_name = strrchr(spec->name, '.'); + object_name = object_name ? object_name+1 : spec->name; + py_object_name = PyUnicode_FromString(object_name); + if (!py_object_name) return NULL; + abi_module = __Pyx_FetchSharedCythonABIModule(); + if (!abi_module) goto done; + abi_module_dict = PyModule_GetDict(abi_module); + if (!abi_module_dict) goto done; + get_item_ref_result = __Pyx_PyDict_GetItemRef(abi_module_dict, py_object_name, &cached_type); + if (get_item_ref_result == 1) { + if (__Pyx_VerifyCachedType( + cached_type, + object_name, + spec->basicsize) < 0) { + goto bad; + } + goto done; + } else if (unlikely(get_item_ref_result == -1)) { + goto bad; + } + cached_type = __Pyx_PyType_FromMetaclass( + metaclass, + CYTHON_USE_MODULE_STATE ? module : abi_module, + spec, bases); + if (unlikely(!cached_type)) goto bad; + if (unlikely(__Pyx_fix_up_extension_type_from_spec(spec, (PyTypeObject *) cached_type) < 0)) goto bad; + new_cached_type = __Pyx_PyDict_SetDefault(abi_module_dict, py_object_name, cached_type); + if (unlikely(new_cached_type != cached_type)) { + if (unlikely(!new_cached_type)) goto bad; + Py_DECREF(cached_type); + cached_type = new_cached_type; + if (__Pyx_VerifyCachedType( + cached_type, + object_name, + spec->basicsize) < 0) { + goto bad; + } + goto done; + } else { + Py_DECREF(new_cached_type); + } +done: + Py_XDECREF(abi_module); + Py_DECREF(py_object_name); + assert(cached_type == NULL || PyType_Check(cached_type)); + return (PyTypeObject *) cached_type; +bad: + Py_XDECREF(cached_type); + cached_type = NULL; + goto done; +} + +/* CommonTypesMetaclass (used by CythonFunctionShared) */ +static PyObject* __pyx_CommonTypesMetaclass_get_module(CYTHON_UNUSED PyObject *self, CYTHON_UNUSED void* context) { + return PyUnicode_FromString(__PYX_ABI_MODULE_NAME); +} +#if __PYX_LIMITED_VERSION_HEX < 0x030A0000 +static PyObject* __pyx_CommonTypesMetaclass_call(CYTHON_UNUSED PyObject *self, CYTHON_UNUSED PyObject *args, CYTHON_UNUSED PyObject *kwds) { + PyErr_SetString(PyExc_TypeError, "Cannot instantiate Cython internal types"); + return NULL; +} +static int __pyx_CommonTypesMetaclass_setattr(CYTHON_UNUSED PyObject *self, CYTHON_UNUSED PyObject *attr, CYTHON_UNUSED PyObject *value) { + PyErr_SetString(PyExc_TypeError, "Cython internal types are immutable"); + return -1; +} +#endif +static PyGetSetDef __pyx_CommonTypesMetaclass_getset[] = { + {"__module__", __pyx_CommonTypesMetaclass_get_module, NULL, NULL, NULL}, + {0, 0, 0, 0, 0} +}; +static PyType_Slot __pyx_CommonTypesMetaclass_slots[] = { + {Py_tp_getset, (void *)__pyx_CommonTypesMetaclass_getset}, + #if __PYX_LIMITED_VERSION_HEX < 0x030A0000 + {Py_tp_call, (void*)__pyx_CommonTypesMetaclass_call}, + {Py_tp_new, (void*)__pyx_CommonTypesMetaclass_call}, + {Py_tp_setattro, (void*)__pyx_CommonTypesMetaclass_setattr}, + #endif + {0, 0} +}; +static PyType_Spec __pyx_CommonTypesMetaclass_spec = { + __PYX_TYPE_MODULE_PREFIX "_common_types_metatype", + 0, + 0, + Py_TPFLAGS_IMMUTABLETYPE | + Py_TPFLAGS_DISALLOW_INSTANTIATION | + Py_TPFLAGS_DEFAULT, + __pyx_CommonTypesMetaclass_slots +}; +static int __pyx_CommonTypesMetaclass_init(PyObject *module) { + __pyx_mstatetype *mstate = __Pyx_PyModule_GetState(module); + PyObject *bases = PyTuple_Pack(1, &PyType_Type); + if (unlikely(!bases)) { + return -1; + } + mstate->__pyx_CommonTypesMetaclassType = __Pyx_FetchCommonTypeFromSpec(NULL, module, &__pyx_CommonTypesMetaclass_spec, bases); + Py_DECREF(bases); + if (unlikely(mstate->__pyx_CommonTypesMetaclassType == NULL)) { + return -1; + } + return 0; +} + +/* CallTypeTraverse (used by CythonFunctionShared) */ +#if !CYTHON_USE_TYPE_SPECS || (!CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x03090000) +#else +static int __Pyx_call_type_traverse(PyObject *o, int always_call, visitproc visit, void *arg) { + #if CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX < 0x03090000 + if (__Pyx_get_runtime_version() < 0x03090000) return 0; + #endif + if (!always_call) { + PyTypeObject *base = __Pyx_PyObject_GetSlot(o, tp_base, PyTypeObject*); + unsigned long flags = PyType_GetFlags(base); + if (flags & Py_TPFLAGS_HEAPTYPE) { + return 0; + } + } + Py_VISIT((PyObject*)Py_TYPE(o)); + return 0; +} +#endif + +/* PyMethodNew (used by CythonFunctionShared) */ +#if CYTHON_COMPILING_IN_LIMITED_API +static PyObject *__Pyx_PyMethod_New(PyObject *func, PyObject *self, PyObject *typ) { + PyObject *result; + CYTHON_UNUSED_VAR(typ); + if (!self) + return __Pyx_NewRef(func); + #if __PYX_LIMITED_VERSION_HEX >= 0x030C0000 + { + PyObject *args[] = {func, self}; + result = PyObject_Vectorcall(__pyx_mstate_global->__Pyx_CachedMethodType, args, 2, NULL); + } + #else + result = PyObject_CallFunctionObjArgs(__pyx_mstate_global->__Pyx_CachedMethodType, func, self, NULL); + #endif + return result; +} +#else +static PyObject *__Pyx_PyMethod_New(PyObject *func, PyObject *self, PyObject *typ) { + CYTHON_UNUSED_VAR(typ); + if (!self) + return __Pyx_NewRef(func); + return PyMethod_New(func, self); +} +#endif + +/* PyVectorcallFastCallDict (used by CythonFunctionShared) */ +#if CYTHON_METH_FASTCALL && CYTHON_VECTORCALL +static PyObject *__Pyx_PyVectorcall_FastCallDict_kw(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw) +{ + PyObject *res = NULL; + PyObject *kwnames; + PyObject **newargs; + PyObject **kwvalues; + Py_ssize_t i; + #if CYTHON_AVOID_BORROWED_REFS + PyObject *pos; + #else + Py_ssize_t pos; + #endif + size_t j; + PyObject *key, *value; + unsigned long keys_are_strings; + #if !CYTHON_ASSUME_SAFE_SIZE + Py_ssize_t nkw = PyDict_Size(kw); + if (unlikely(nkw == -1)) return NULL; + #else + Py_ssize_t nkw = PyDict_GET_SIZE(kw); + #endif + newargs = (PyObject **)PyMem_Malloc((nargs + (size_t)nkw) * sizeof(args[0])); + if (unlikely(newargs == NULL)) { + PyErr_NoMemory(); + return NULL; + } + for (j = 0; j < nargs; j++) newargs[j] = args[j]; + kwnames = PyTuple_New(nkw); + if (unlikely(kwnames == NULL)) { + PyMem_Free(newargs); + return NULL; + } + kwvalues = newargs + nargs; + pos = 0; + i = 0; + keys_are_strings = Py_TPFLAGS_UNICODE_SUBCLASS; + while (__Pyx_PyDict_NextRef(kw, &pos, &key, &value)) { + keys_are_strings &= + #if CYTHON_COMPILING_IN_LIMITED_API + PyType_GetFlags(Py_TYPE(key)); + #else + Py_TYPE(key)->tp_flags; + #endif + #if !CYTHON_ASSUME_SAFE_MACROS + if (unlikely(PyTuple_SetItem(kwnames, i, key) < 0)) goto cleanup; + #else + PyTuple_SET_ITEM(kwnames, i, key); + #endif + kwvalues[i] = value; + i++; + } + if (unlikely(!keys_are_strings)) { + PyErr_SetString(PyExc_TypeError, "keywords must be strings"); + goto cleanup; + } + res = vc(func, newargs, nargs, kwnames); +cleanup: + #if CYTHON_AVOID_BORROWED_REFS + Py_DECREF(pos); + #endif + Py_DECREF(kwnames); + for (i = 0; i < nkw; i++) + Py_DECREF(kwvalues[i]); + PyMem_Free(newargs); + return res; +} +static CYTHON_INLINE PyObject *__Pyx_PyVectorcall_FastCallDict(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw) +{ + Py_ssize_t kw_size = + likely(kw == NULL) ? + 0 : +#if !CYTHON_ASSUME_SAFE_SIZE + PyDict_Size(kw); +#else + PyDict_GET_SIZE(kw); +#endif + if (kw_size == 0) { + return vc(func, args, nargs, NULL); + } +#if !CYTHON_ASSUME_SAFE_SIZE + else if (unlikely(kw_size == -1)) { + return NULL; + } +#endif + return __Pyx_PyVectorcall_FastCallDict_kw(func, vc, args, nargs, kw); +} +#endif + +/* CythonFunctionShared (used by CythonFunction) */ +#if CYTHON_COMPILING_IN_LIMITED_API +static CYTHON_INLINE int __Pyx__IsSameCyOrCFunctionNoMethod(PyObject *func, void (*cfunc)(void)) { + if (__Pyx_CyFunction_Check(func)) { + return PyCFunction_GetFunction(((__pyx_CyFunctionObject*)func)->func) == (PyCFunction) cfunc; + } else if (PyCFunction_Check(func)) { + return PyCFunction_GetFunction(func) == (PyCFunction) cfunc; + } + return 0; +} +static CYTHON_INLINE int __Pyx__IsSameCyOrCFunction(PyObject *func, void (*cfunc)(void)) { + if ((PyObject*)Py_TYPE(func) == __pyx_mstate_global->__Pyx_CachedMethodType) { + int result; + PyObject *newFunc = PyObject_GetAttr(func, __pyx_mstate_global->__pyx_n_u_func); + if (unlikely(!newFunc)) { + PyErr_Clear(); // It's only an optimization, so don't throw an error + return 0; + } + result = __Pyx__IsSameCyOrCFunctionNoMethod(newFunc, cfunc); + Py_DECREF(newFunc); + return result; + } + return __Pyx__IsSameCyOrCFunctionNoMethod(func, cfunc); +} +#else +static CYTHON_INLINE int __Pyx__IsSameCyOrCFunction(PyObject *func, void (*cfunc)(void)) { + if (PyMethod_Check(func)) { + func = PyMethod_GET_FUNCTION(func); + } + return __Pyx_CyOrPyCFunction_Check(func) && __Pyx_CyOrPyCFunction_GET_FUNCTION(func) == (PyCFunction) cfunc; +} +#endif +static CYTHON_INLINE void __Pyx__CyFunction_SetClassObj(__pyx_CyFunctionObject* f, PyObject* classobj) { +#if PY_VERSION_HEX < 0x030900B1 || CYTHON_COMPILING_IN_LIMITED_API + __Pyx_Py_XDECREF_SET( + __Pyx_CyFunction_GetClassObj(f), + ((classobj) ? __Pyx_NewRef(classobj) : NULL)); +#else + __Pyx_Py_XDECREF_SET( + ((PyCMethodObject *) (f))->mm_class, + (PyTypeObject*)((classobj) ? __Pyx_NewRef(classobj) : NULL)); +#endif +} +static PyObject * +__Pyx_CyFunction_get_doc_locked(__pyx_CyFunctionObject *op) +{ + if (unlikely(op->func_doc == NULL)) { +#if CYTHON_COMPILING_IN_LIMITED_API + op->func_doc = PyObject_GetAttrString(op->func, "__doc__"); + if (unlikely(!op->func_doc)) return NULL; +#else + if (((PyCFunctionObject*)op)->m_ml->ml_doc) { + op->func_doc = PyUnicode_FromString(((PyCFunctionObject*)op)->m_ml->ml_doc); + if (unlikely(op->func_doc == NULL)) + return NULL; + } else { + Py_INCREF(Py_None); + return Py_None; + } +#endif + } + Py_INCREF(op->func_doc); + return op->func_doc; +} +static PyObject * +__Pyx_CyFunction_get_doc(__pyx_CyFunctionObject *op, void *closure) { + PyObject *result; + CYTHON_UNUSED_VAR(closure); + __Pyx_BEGIN_CRITICAL_SECTION(op); + result = __Pyx_CyFunction_get_doc_locked(op); + __Pyx_END_CRITICAL_SECTION(); + return result; +} +static int +__Pyx_CyFunction_set_doc(__pyx_CyFunctionObject *op, PyObject *value, void *context) +{ + CYTHON_UNUSED_VAR(context); + if (value == NULL) { + value = Py_None; + } + Py_INCREF(value); + __Pyx_BEGIN_CRITICAL_SECTION(op); + __Pyx_Py_XDECREF_SET(op->func_doc, value); + __Pyx_END_CRITICAL_SECTION(); + return 0; +} +static PyObject * +__Pyx_CyFunction_get_name_locked(__pyx_CyFunctionObject *op) +{ + if (unlikely(op->func_name == NULL)) { +#if CYTHON_COMPILING_IN_LIMITED_API + op->func_name = PyObject_GetAttrString(op->func, "__name__"); +#else + op->func_name = PyUnicode_InternFromString(((PyCFunctionObject*)op)->m_ml->ml_name); +#endif + if (unlikely(op->func_name == NULL)) + return NULL; + } + Py_INCREF(op->func_name); + return op->func_name; +} +static PyObject * +__Pyx_CyFunction_get_name(__pyx_CyFunctionObject *op, void *context) +{ + PyObject *result = NULL; + CYTHON_UNUSED_VAR(context); + __Pyx_BEGIN_CRITICAL_SECTION(op); + result = __Pyx_CyFunction_get_name_locked(op); + __Pyx_END_CRITICAL_SECTION(); + return result; +} +static int +__Pyx_CyFunction_set_name(__pyx_CyFunctionObject *op, PyObject *value, void *context) +{ + CYTHON_UNUSED_VAR(context); + if (unlikely(value == NULL || !PyUnicode_Check(value))) { + PyErr_SetString(PyExc_TypeError, + "__name__ must be set to a string object"); + return -1; + } + Py_INCREF(value); + __Pyx_BEGIN_CRITICAL_SECTION(op); + __Pyx_Py_XDECREF_SET(op->func_name, value); + __Pyx_END_CRITICAL_SECTION(); + return 0; +} +static PyObject * +__Pyx_CyFunction_get_qualname(__pyx_CyFunctionObject *op, void *context) +{ + CYTHON_UNUSED_VAR(context); + PyObject *result; + __Pyx_BEGIN_CRITICAL_SECTION(op); + Py_INCREF(op->func_qualname); + result = op->func_qualname; + __Pyx_END_CRITICAL_SECTION(); + return result; +} +static int +__Pyx_CyFunction_set_qualname(__pyx_CyFunctionObject *op, PyObject *value, void *context) +{ + CYTHON_UNUSED_VAR(context); + if (unlikely(value == NULL || !PyUnicode_Check(value))) { + PyErr_SetString(PyExc_TypeError, + "__qualname__ must be set to a string object"); + return -1; + } + Py_INCREF(value); + __Pyx_BEGIN_CRITICAL_SECTION(op); + __Pyx_Py_XDECREF_SET(op->func_qualname, value); + __Pyx_END_CRITICAL_SECTION(); + return 0; +} +#if CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX < 0x030A0000 +static PyObject * +__Pyx_CyFunction_get_dict(__pyx_CyFunctionObject *op, void *context) +{ + CYTHON_UNUSED_VAR(context); + if (unlikely(op->func_dict == NULL)) { + op->func_dict = PyDict_New(); + if (unlikely(op->func_dict == NULL)) + return NULL; + } + Py_INCREF(op->func_dict); + return op->func_dict; +} +#endif +static PyObject * +__Pyx_CyFunction_get_globals(__pyx_CyFunctionObject *op, void *context) +{ + CYTHON_UNUSED_VAR(context); + Py_INCREF(op->func_globals); + return op->func_globals; +} +static PyObject * +__Pyx_CyFunction_get_closure(__pyx_CyFunctionObject *op, void *context) +{ + CYTHON_UNUSED_VAR(op); + CYTHON_UNUSED_VAR(context); + Py_INCREF(Py_None); + return Py_None; +} +static PyObject * +__Pyx_CyFunction_get_code(__pyx_CyFunctionObject *op, void *context) +{ + PyObject* result = (op->func_code) ? op->func_code : Py_None; + CYTHON_UNUSED_VAR(context); + Py_INCREF(result); + return result; +} +static int +__Pyx_CyFunction_init_defaults(__pyx_CyFunctionObject *op) { + int result = 0; + PyObject *res = op->defaults_getter((PyObject *) op); + if (unlikely(!res)) + return -1; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + op->defaults_tuple = PyTuple_GET_ITEM(res, 0); + Py_INCREF(op->defaults_tuple); + op->defaults_kwdict = PyTuple_GET_ITEM(res, 1); + Py_INCREF(op->defaults_kwdict); + #else + op->defaults_tuple = __Pyx_PySequence_ITEM(res, 0); + if (unlikely(!op->defaults_tuple)) result = -1; + else { + op->defaults_kwdict = __Pyx_PySequence_ITEM(res, 1); + if (unlikely(!op->defaults_kwdict)) result = -1; + } + #endif + Py_DECREF(res); + return result; +} +static int +__Pyx_CyFunction_set_defaults(__pyx_CyFunctionObject *op, PyObject* value, void *context) { + CYTHON_UNUSED_VAR(context); + if (!value) { + value = Py_None; + } else if (unlikely(value != Py_None && !PyTuple_Check(value))) { + PyErr_SetString(PyExc_TypeError, + "__defaults__ must be set to a tuple object"); + return -1; + } + PyErr_WarnEx(PyExc_RuntimeWarning, "changes to cyfunction.__defaults__ will not " + "currently affect the values used in function calls", 1); + Py_INCREF(value); + __Pyx_BEGIN_CRITICAL_SECTION(op); + __Pyx_Py_XDECREF_SET(op->defaults_tuple, value); + __Pyx_END_CRITICAL_SECTION(); + return 0; +} +static PyObject * +__Pyx_CyFunction_get_defaults_locked(__pyx_CyFunctionObject *op) { + PyObject* result = op->defaults_tuple; + if (unlikely(!result)) { + if (op->defaults_getter) { + if (unlikely(__Pyx_CyFunction_init_defaults(op) < 0)) return NULL; + result = op->defaults_tuple; + } else { + result = Py_None; + } + } + Py_INCREF(result); + return result; +} +static PyObject * +__Pyx_CyFunction_get_defaults(__pyx_CyFunctionObject *op, void *context) { + PyObject* result = NULL; + CYTHON_UNUSED_VAR(context); + __Pyx_BEGIN_CRITICAL_SECTION(op); + result = __Pyx_CyFunction_get_defaults_locked(op); + __Pyx_END_CRITICAL_SECTION(); + return result; +} +static int +__Pyx_CyFunction_set_kwdefaults(__pyx_CyFunctionObject *op, PyObject* value, void *context) { + CYTHON_UNUSED_VAR(context); + if (!value) { + value = Py_None; + } else if (unlikely(value != Py_None && !PyDict_Check(value))) { + PyErr_SetString(PyExc_TypeError, + "__kwdefaults__ must be set to a dict object"); + return -1; + } + PyErr_WarnEx(PyExc_RuntimeWarning, "changes to cyfunction.__kwdefaults__ will not " + "currently affect the values used in function calls", 1); + Py_INCREF(value); + __Pyx_BEGIN_CRITICAL_SECTION(op); + __Pyx_Py_XDECREF_SET(op->defaults_kwdict, value); + __Pyx_END_CRITICAL_SECTION(); + return 0; +} +static PyObject * +__Pyx_CyFunction_get_kwdefaults_locked(__pyx_CyFunctionObject *op) { + PyObject* result = op->defaults_kwdict; + if (unlikely(!result)) { + if (op->defaults_getter) { + if (unlikely(__Pyx_CyFunction_init_defaults(op) < 0)) return NULL; + result = op->defaults_kwdict; + } else { + result = Py_None; + } + } + Py_INCREF(result); + return result; +} +static PyObject * +__Pyx_CyFunction_get_kwdefaults(__pyx_CyFunctionObject *op, void *context) { + PyObject* result; + CYTHON_UNUSED_VAR(context); + __Pyx_BEGIN_CRITICAL_SECTION(op); + result = __Pyx_CyFunction_get_kwdefaults_locked(op); + __Pyx_END_CRITICAL_SECTION(); + return result; +} +static int +__Pyx_CyFunction_set_annotations(__pyx_CyFunctionObject *op, PyObject* value, void *context) { + CYTHON_UNUSED_VAR(context); + if (!value || value == Py_None) { + value = NULL; + } else if (unlikely(!PyDict_Check(value))) { + PyErr_SetString(PyExc_TypeError, + "__annotations__ must be set to a dict object"); + return -1; + } + Py_XINCREF(value); + __Pyx_BEGIN_CRITICAL_SECTION(op); + __Pyx_Py_XDECREF_SET(op->func_annotations, value); + __Pyx_END_CRITICAL_SECTION(); + return 0; +} +static PyObject * +__Pyx_CyFunction_get_annotations_locked(__pyx_CyFunctionObject *op) { + PyObject* result = op->func_annotations; + if (unlikely(!result)) { + result = PyDict_New(); + if (unlikely(!result)) return NULL; + op->func_annotations = result; + } + Py_INCREF(result); + return result; +} +static PyObject * +__Pyx_CyFunction_get_annotations(__pyx_CyFunctionObject *op, void *context) { + PyObject *result; + CYTHON_UNUSED_VAR(context); + __Pyx_BEGIN_CRITICAL_SECTION(op); + result = __Pyx_CyFunction_get_annotations_locked(op); + __Pyx_END_CRITICAL_SECTION(); + return result; +} +static PyObject * +__Pyx_CyFunction_get_is_coroutine_value(__pyx_CyFunctionObject *op) { + int is_coroutine = op->flags & __Pyx_CYFUNCTION_COROUTINE; + if (is_coroutine) { + PyObject *is_coroutine_value, *module, *fromlist, *marker = __pyx_mstate_global->__pyx_n_u_is_coroutine; + fromlist = PyList_New(1); + if (unlikely(!fromlist)) return NULL; + Py_INCREF(marker); +#if CYTHON_ASSUME_SAFE_MACROS + PyList_SET_ITEM(fromlist, 0, marker); +#else + if (unlikely(PyList_SetItem(fromlist, 0, marker) < 0)) { + Py_DECREF(marker); + Py_DECREF(fromlist); + return NULL; + } +#endif + module = PyImport_ImportModuleLevelObject(__pyx_mstate_global->__pyx_n_u_asyncio_coroutines, NULL, NULL, fromlist, 0); + Py_DECREF(fromlist); + if (unlikely(!module)) goto ignore; + is_coroutine_value = __Pyx_PyObject_GetAttrStr(module, marker); + Py_DECREF(module); + if (likely(is_coroutine_value)) { + return is_coroutine_value; + } +ignore: + PyErr_Clear(); + } + return __Pyx_PyBool_FromLong(is_coroutine); +} +static PyObject * +__Pyx_CyFunction_get_is_coroutine(__pyx_CyFunctionObject *op, void *context) { + PyObject *result; + CYTHON_UNUSED_VAR(context); + if (op->func_is_coroutine) { + return __Pyx_NewRef(op->func_is_coroutine); + } + result = __Pyx_CyFunction_get_is_coroutine_value(op); + if (unlikely(!result)) + return NULL; + __Pyx_BEGIN_CRITICAL_SECTION(op); + if (op->func_is_coroutine) { + Py_DECREF(result); + result = __Pyx_NewRef(op->func_is_coroutine); + } else { + op->func_is_coroutine = __Pyx_NewRef(result); + } + __Pyx_END_CRITICAL_SECTION(); + return result; +} +static void __Pyx_CyFunction_raise_argument_count_error(__pyx_CyFunctionObject *func, const char* message, Py_ssize_t size) { +#if CYTHON_COMPILING_IN_LIMITED_API + PyObject *py_name = __Pyx_CyFunction_get_name(func, NULL); + if (!py_name) return; + PyErr_Format(PyExc_TypeError, + "%.200S() %s (%" CYTHON_FORMAT_SSIZE_T "d given)", + py_name, message, size); + Py_DECREF(py_name); +#else + const char* name = ((PyCFunctionObject*)func)->m_ml->ml_name; + PyErr_Format(PyExc_TypeError, + "%.200s() %s (%" CYTHON_FORMAT_SSIZE_T "d given)", + name, message, size); +#endif +} +static void __Pyx_CyFunction_raise_type_error(__pyx_CyFunctionObject *func, const char* message) { +#if CYTHON_COMPILING_IN_LIMITED_API + PyObject *py_name = __Pyx_CyFunction_get_name(func, NULL); + if (!py_name) return; + PyErr_Format(PyExc_TypeError, + "%.200S() %s", + py_name, message); + Py_DECREF(py_name); +#else + const char* name = ((PyCFunctionObject*)func)->m_ml->ml_name; + PyErr_Format(PyExc_TypeError, + "%.200s() %s", + name, message); +#endif +} +#if CYTHON_COMPILING_IN_LIMITED_API +static PyObject * +__Pyx_CyFunction_get_module(__pyx_CyFunctionObject *op, void *context) { + CYTHON_UNUSED_VAR(context); + return PyObject_GetAttrString(op->func, "__module__"); +} +static int +__Pyx_CyFunction_set_module(__pyx_CyFunctionObject *op, PyObject* value, void *context) { + CYTHON_UNUSED_VAR(context); + return PyObject_SetAttrString(op->func, "__module__", value); +} +#endif +static PyGetSetDef __pyx_CyFunction_getsets[] = { + {"func_doc", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, + {"__doc__", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, + {"func_name", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, + {"__name__", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, + {"__qualname__", (getter)__Pyx_CyFunction_get_qualname, (setter)__Pyx_CyFunction_set_qualname, 0, 0}, +#if CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX < 0x030A0000 + {"func_dict", (getter)__Pyx_CyFunction_get_dict, (setter)PyObject_GenericSetDict, 0, 0}, + {"__dict__", (getter)__Pyx_CyFunction_get_dict, (setter)PyObject_GenericSetDict, 0, 0}, +#else + {"func_dict", (getter)PyObject_GenericGetDict, (setter)PyObject_GenericSetDict, 0, 0}, + {"__dict__", (getter)PyObject_GenericGetDict, (setter)PyObject_GenericSetDict, 0, 0}, +#endif + {"func_globals", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, + {"__globals__", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, + {"func_closure", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, + {"__closure__", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, + {"func_code", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, + {"__code__", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, + {"func_defaults", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, + {"__defaults__", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, + {"__kwdefaults__", (getter)__Pyx_CyFunction_get_kwdefaults, (setter)__Pyx_CyFunction_set_kwdefaults, 0, 0}, + {"__annotations__", (getter)__Pyx_CyFunction_get_annotations, (setter)__Pyx_CyFunction_set_annotations, 0, 0}, + {"_is_coroutine", (getter)__Pyx_CyFunction_get_is_coroutine, 0, 0, 0}, +#if CYTHON_COMPILING_IN_LIMITED_API + {"__module__", (getter)__Pyx_CyFunction_get_module, (setter)__Pyx_CyFunction_set_module, 0, 0}, +#endif + {0, 0, 0, 0, 0} +}; +static PyMemberDef __pyx_CyFunction_members[] = { +#if !CYTHON_COMPILING_IN_LIMITED_API + {"__module__", T_OBJECT, offsetof(PyCFunctionObject, m_module), 0, 0}, +#endif +#if PY_VERSION_HEX < 0x030C0000 || CYTHON_COMPILING_IN_LIMITED_API + {"__dictoffset__", T_PYSSIZET, offsetof(__pyx_CyFunctionObject, func_dict), READONLY, 0}, +#endif +#if CYTHON_METH_FASTCALL +#if CYTHON_COMPILING_IN_LIMITED_API + {"__vectorcalloffset__", T_PYSSIZET, offsetof(__pyx_CyFunctionObject, func_vectorcall), READONLY, 0}, +#else + {"__vectorcalloffset__", T_PYSSIZET, offsetof(PyCFunctionObject, vectorcall), READONLY, 0}, +#endif +#if CYTHON_COMPILING_IN_LIMITED_API + {"__weaklistoffset__", T_PYSSIZET, offsetof(__pyx_CyFunctionObject, func_weakreflist), READONLY, 0}, +#else + {"__weaklistoffset__", T_PYSSIZET, offsetof(PyCFunctionObject, m_weakreflist), READONLY, 0}, +#endif +#endif + {0, 0, 0, 0, 0} +}; +static PyObject * +__Pyx_CyFunction_reduce(__pyx_CyFunctionObject *m, PyObject *args) +{ + PyObject *result = NULL; + CYTHON_UNUSED_VAR(args); + __Pyx_BEGIN_CRITICAL_SECTION(m); + Py_INCREF(m->func_qualname); + result = m->func_qualname; + __Pyx_END_CRITICAL_SECTION(); + return result; +} +static PyMethodDef __pyx_CyFunction_methods[] = { + {"__reduce__", (PyCFunction)__Pyx_CyFunction_reduce, METH_VARARGS, 0}, + {0, 0, 0, 0} +}; +#if CYTHON_COMPILING_IN_LIMITED_API +#define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func_weakreflist) +#else +#define __Pyx_CyFunction_weakreflist(cyfunc) (((PyCFunctionObject*)cyfunc)->m_weakreflist) +#endif +static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject *op, PyMethodDef *ml, int flags, PyObject* qualname, + PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) { +#if !CYTHON_COMPILING_IN_LIMITED_API + PyCFunctionObject *cf = (PyCFunctionObject*) op; +#endif + if (unlikely(op == NULL)) + return NULL; +#if CYTHON_COMPILING_IN_LIMITED_API + op->func = PyCFunction_NewEx(ml, (PyObject*)op, module); + if (unlikely(!op->func)) return NULL; +#endif + op->flags = flags; + __Pyx_CyFunction_weakreflist(op) = NULL; +#if !CYTHON_COMPILING_IN_LIMITED_API + cf->m_ml = ml; + cf->m_self = (PyObject *) op; +#endif + Py_XINCREF(closure); + op->func_closure = closure; +#if !CYTHON_COMPILING_IN_LIMITED_API + Py_XINCREF(module); + cf->m_module = module; +#endif +#if PY_VERSION_HEX < 0x030C0000 || CYTHON_COMPILING_IN_LIMITED_API + op->func_dict = NULL; +#endif + op->func_name = NULL; + Py_INCREF(qualname); + op->func_qualname = qualname; + op->func_doc = NULL; +#if PY_VERSION_HEX < 0x030900B1 || CYTHON_COMPILING_IN_LIMITED_API + op->func_classobj = NULL; +#else + ((PyCMethodObject*)op)->mm_class = NULL; +#endif + op->func_globals = globals; + Py_INCREF(op->func_globals); + Py_XINCREF(code); + op->func_code = code; + op->defaults = NULL; + op->defaults_tuple = NULL; + op->defaults_kwdict = NULL; + op->defaults_getter = NULL; + op->func_annotations = NULL; + op->func_is_coroutine = NULL; +#if CYTHON_METH_FASTCALL + switch (ml->ml_flags & (METH_VARARGS | METH_FASTCALL | METH_NOARGS | METH_O | METH_KEYWORDS | METH_METHOD)) { + case METH_NOARGS: + __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_NOARGS; + break; + case METH_O: + __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_O; + break; + case METH_METHOD | METH_FASTCALL | METH_KEYWORDS: + __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS_METHOD; + break; + case METH_FASTCALL | METH_KEYWORDS: + __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS; + break; + case METH_VARARGS | METH_KEYWORDS: + __Pyx_CyFunction_func_vectorcall(op) = NULL; + break; + default: + PyErr_SetString(PyExc_SystemError, "Bad call flags for CyFunction"); + Py_DECREF(op); + return NULL; + } +#endif + return (PyObject *) op; +} +static int +__Pyx_CyFunction_clear(__pyx_CyFunctionObject *m) +{ + Py_CLEAR(m->func_closure); +#if CYTHON_COMPILING_IN_LIMITED_API + Py_CLEAR(m->func); +#else + Py_CLEAR(((PyCFunctionObject*)m)->m_module); +#endif +#if PY_VERSION_HEX < 0x030C0000 || CYTHON_COMPILING_IN_LIMITED_API + Py_CLEAR(m->func_dict); +#elif PY_VERSION_HEX < 0x030d0000 + _PyObject_ClearManagedDict((PyObject*)m); +#else + PyObject_ClearManagedDict((PyObject*)m); +#endif + Py_CLEAR(m->func_name); + Py_CLEAR(m->func_qualname); + Py_CLEAR(m->func_doc); + Py_CLEAR(m->func_globals); + Py_CLEAR(m->func_code); +#if !CYTHON_COMPILING_IN_LIMITED_API +#if PY_VERSION_HEX < 0x030900B1 + Py_CLEAR(__Pyx_CyFunction_GetClassObj(m)); +#else + { + PyObject *cls = (PyObject*) ((PyCMethodObject *) (m))->mm_class; + ((PyCMethodObject *) (m))->mm_class = NULL; + Py_XDECREF(cls); + } +#endif +#endif + Py_CLEAR(m->defaults_tuple); + Py_CLEAR(m->defaults_kwdict); + Py_CLEAR(m->func_annotations); + Py_CLEAR(m->func_is_coroutine); + Py_CLEAR(m->defaults); + return 0; +} +static void __Pyx__CyFunction_dealloc(__pyx_CyFunctionObject *m) +{ + if (__Pyx_CyFunction_weakreflist(m) != NULL) + PyObject_ClearWeakRefs((PyObject *) m); + __Pyx_CyFunction_clear(m); + __Pyx_PyHeapTypeObject_GC_Del(m); +} +static void __Pyx_CyFunction_dealloc(__pyx_CyFunctionObject *m) +{ + PyObject_GC_UnTrack(m); + __Pyx__CyFunction_dealloc(m); +} +static int __Pyx_CyFunction_traverse(__pyx_CyFunctionObject *m, visitproc visit, void *arg) +{ + { + int e = __Pyx_call_type_traverse((PyObject*)m, 1, visit, arg); + if (e) return e; + } + Py_VISIT(m->func_closure); +#if CYTHON_COMPILING_IN_LIMITED_API + Py_VISIT(m->func); +#else + Py_VISIT(((PyCFunctionObject*)m)->m_module); +#endif +#if PY_VERSION_HEX < 0x030C0000 || CYTHON_COMPILING_IN_LIMITED_API + Py_VISIT(m->func_dict); +#else + { + int e = +#if PY_VERSION_HEX < 0x030d0000 + _PyObject_VisitManagedDict +#else + PyObject_VisitManagedDict +#endif + ((PyObject*)m, visit, arg); + if (e != 0) return e; + } +#endif + __Pyx_VISIT_CONST(m->func_name); + __Pyx_VISIT_CONST(m->func_qualname); + Py_VISIT(m->func_doc); + Py_VISIT(m->func_globals); + __Pyx_VISIT_CONST(m->func_code); +#if !CYTHON_COMPILING_IN_LIMITED_API + Py_VISIT(__Pyx_CyFunction_GetClassObj(m)); +#endif + Py_VISIT(m->defaults_tuple); + Py_VISIT(m->defaults_kwdict); + Py_VISIT(m->func_is_coroutine); + Py_VISIT(m->defaults); + return 0; +} +static PyObject* +__Pyx_CyFunction_repr(__pyx_CyFunctionObject *op) +{ + PyObject *repr; + __Pyx_BEGIN_CRITICAL_SECTION(op); + repr = PyUnicode_FromFormat("", + op->func_qualname, (void *)op); + __Pyx_END_CRITICAL_SECTION(); + return repr; +} +static PyObject * __Pyx_CyFunction_CallMethod(PyObject *func, PyObject *self, PyObject *arg, PyObject *kw) { +#if CYTHON_COMPILING_IN_LIMITED_API + PyObject *f = ((__pyx_CyFunctionObject*)func)->func; + PyCFunction meth; + int flags; + meth = PyCFunction_GetFunction(f); + if (unlikely(!meth)) return NULL; + flags = PyCFunction_GetFlags(f); + if (unlikely(flags < 0)) return NULL; +#else + PyCFunctionObject* f = (PyCFunctionObject*)func; + PyCFunction meth = f->m_ml->ml_meth; + int flags = f->m_ml->ml_flags; +#endif + Py_ssize_t size; + switch (flags & (METH_VARARGS | METH_KEYWORDS | METH_NOARGS | METH_O)) { + case METH_VARARGS: + if (likely(kw == NULL || PyDict_Size(kw) == 0)) + return (*meth)(self, arg); + break; + case METH_VARARGS | METH_KEYWORDS: + return (*(PyCFunctionWithKeywords)(void(*)(void))meth)(self, arg, kw); + case METH_NOARGS: + if (likely(kw == NULL || PyDict_Size(kw) == 0)) { +#if CYTHON_ASSUME_SAFE_SIZE + size = PyTuple_GET_SIZE(arg); +#else + size = PyTuple_Size(arg); + if (unlikely(size < 0)) return NULL; +#endif + if (likely(size == 0)) + return (*meth)(self, NULL); + __Pyx_CyFunction_raise_argument_count_error( + (__pyx_CyFunctionObject*)func, + "takes no arguments", size); + return NULL; + } + break; + case METH_O: + if (likely(kw == NULL || PyDict_Size(kw) == 0)) { +#if CYTHON_ASSUME_SAFE_SIZE + size = PyTuple_GET_SIZE(arg); +#else + size = PyTuple_Size(arg); + if (unlikely(size < 0)) return NULL; +#endif + if (likely(size == 1)) { + PyObject *result, *arg0; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + arg0 = PyTuple_GET_ITEM(arg, 0); + #else + arg0 = __Pyx_PySequence_ITEM(arg, 0); if (unlikely(!arg0)) return NULL; + #endif + result = (*meth)(self, arg0); + #if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) + Py_DECREF(arg0); + #endif + return result; + } + __Pyx_CyFunction_raise_argument_count_error( + (__pyx_CyFunctionObject*)func, + "takes exactly one argument", size); + return NULL; + } + break; + default: + PyErr_SetString(PyExc_SystemError, "Bad call flags for CyFunction"); + return NULL; + } + __Pyx_CyFunction_raise_type_error( + (__pyx_CyFunctionObject*)func, "takes no keyword arguments"); + return NULL; +} +static CYTHON_INLINE PyObject *__Pyx_CyFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) { + PyObject *self, *result; +#if CYTHON_COMPILING_IN_LIMITED_API + self = PyCFunction_GetSelf(((__pyx_CyFunctionObject*)func)->func); + if (unlikely(!self) && PyErr_Occurred()) return NULL; +#else + self = ((PyCFunctionObject*)func)->m_self; +#endif + result = __Pyx_CyFunction_CallMethod(func, self, arg, kw); + return result; +} +static PyObject *__Pyx_CyFunction_CallAsMethod(PyObject *func, PyObject *args, PyObject *kw) { + PyObject *result; + __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func; +#if CYTHON_METH_FASTCALL && CYTHON_VECTORCALL + __pyx_vectorcallfunc vc = __Pyx_CyFunction_func_vectorcall(cyfunc); + if (vc) { +#if CYTHON_ASSUME_SAFE_MACROS && CYTHON_ASSUME_SAFE_SIZE + return __Pyx_PyVectorcall_FastCallDict(func, vc, &PyTuple_GET_ITEM(args, 0), (size_t)PyTuple_GET_SIZE(args), kw); +#else + (void) &__Pyx_PyVectorcall_FastCallDict; + return PyVectorcall_Call(func, args, kw); +#endif + } +#endif + if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) { + Py_ssize_t argc; + PyObject *new_args; + PyObject *self; +#if CYTHON_ASSUME_SAFE_SIZE + argc = PyTuple_GET_SIZE(args); +#else + argc = PyTuple_Size(args); + if (unlikely(argc < 0)) return NULL; +#endif + new_args = PyTuple_GetSlice(args, 1, argc); + if (unlikely(!new_args)) + return NULL; + self = PyTuple_GetItem(args, 0); + if (unlikely(!self)) { + Py_DECREF(new_args); + PyErr_Format(PyExc_TypeError, + "unbound method %.200S() needs an argument", + cyfunc->func_qualname); + return NULL; + } + result = __Pyx_CyFunction_CallMethod(func, self, new_args, kw); + Py_DECREF(new_args); + } else { + result = __Pyx_CyFunction_Call(func, args, kw); + } + return result; +} +#if CYTHON_METH_FASTCALL && CYTHON_VECTORCALL +static CYTHON_INLINE int __Pyx_CyFunction_Vectorcall_CheckArgs(__pyx_CyFunctionObject *cyfunc, Py_ssize_t nargs, PyObject *kwnames) +{ + int ret = 0; + if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) { + if (unlikely(nargs < 1)) { + __Pyx_CyFunction_raise_type_error( + cyfunc, "needs an argument"); + return -1; + } + ret = 1; + } + if (unlikely(kwnames) && unlikely(__Pyx_PyTuple_GET_SIZE(kwnames))) { + __Pyx_CyFunction_raise_type_error( + cyfunc, "takes no keyword arguments"); + return -1; + } + return ret; +} +static PyObject * __Pyx_CyFunction_Vectorcall_NOARGS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) +{ + __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; + Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); + PyObject *self; +#if CYTHON_COMPILING_IN_LIMITED_API + PyCFunction meth = PyCFunction_GetFunction(cyfunc->func); + if (unlikely(!meth)) return NULL; +#else + PyCFunction meth = ((PyCFunctionObject*)cyfunc)->m_ml->ml_meth; +#endif + switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, kwnames)) { + case 1: + self = args[0]; + args += 1; + nargs -= 1; + break; + case 0: +#if CYTHON_COMPILING_IN_LIMITED_API + self = PyCFunction_GetSelf(((__pyx_CyFunctionObject*)cyfunc)->func); + if (unlikely(!self) && PyErr_Occurred()) return NULL; +#else + self = ((PyCFunctionObject*)cyfunc)->m_self; +#endif + break; + default: + return NULL; + } + if (unlikely(nargs != 0)) { + __Pyx_CyFunction_raise_argument_count_error( + cyfunc, "takes no arguments", nargs); + return NULL; + } + return meth(self, NULL); +} +static PyObject * __Pyx_CyFunction_Vectorcall_O(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) +{ + __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; + Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); + PyObject *self; +#if CYTHON_COMPILING_IN_LIMITED_API + PyCFunction meth = PyCFunction_GetFunction(cyfunc->func); + if (unlikely(!meth)) return NULL; +#else + PyCFunction meth = ((PyCFunctionObject*)cyfunc)->m_ml->ml_meth; +#endif + switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, kwnames)) { + case 1: + self = args[0]; + args += 1; + nargs -= 1; + break; + case 0: +#if CYTHON_COMPILING_IN_LIMITED_API + self = PyCFunction_GetSelf(((__pyx_CyFunctionObject*)cyfunc)->func); + if (unlikely(!self) && PyErr_Occurred()) return NULL; +#else + self = ((PyCFunctionObject*)cyfunc)->m_self; +#endif + break; + default: + return NULL; + } + if (unlikely(nargs != 1)) { + __Pyx_CyFunction_raise_argument_count_error( + cyfunc, "takes exactly one argument", nargs); + return NULL; + } + return meth(self, args[0]); +} +static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) +{ + __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; + Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); + PyObject *self; +#if CYTHON_COMPILING_IN_LIMITED_API + PyCFunction meth = PyCFunction_GetFunction(cyfunc->func); + if (unlikely(!meth)) return NULL; +#else + PyCFunction meth = ((PyCFunctionObject*)cyfunc)->m_ml->ml_meth; +#endif + switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, NULL)) { + case 1: + self = args[0]; + args += 1; + nargs -= 1; + break; + case 0: +#if CYTHON_COMPILING_IN_LIMITED_API + self = PyCFunction_GetSelf(((__pyx_CyFunctionObject*)cyfunc)->func); + if (unlikely(!self) && PyErr_Occurred()) return NULL; +#else + self = ((PyCFunctionObject*)cyfunc)->m_self; +#endif + break; + default: + return NULL; + } + return ((__Pyx_PyCFunctionFastWithKeywords)(void(*)(void))meth)(self, args, nargs, kwnames); +} +static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS_METHOD(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) +{ + __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; + PyTypeObject *cls = (PyTypeObject *) __Pyx_CyFunction_GetClassObj(cyfunc); + Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); + PyObject *self; +#if CYTHON_COMPILING_IN_LIMITED_API + PyCFunction meth = PyCFunction_GetFunction(cyfunc->func); + if (unlikely(!meth)) return NULL; +#else + PyCFunction meth = ((PyCFunctionObject*)cyfunc)->m_ml->ml_meth; +#endif + switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, NULL)) { + case 1: + self = args[0]; + args += 1; + nargs -= 1; + break; + case 0: +#if CYTHON_COMPILING_IN_LIMITED_API + self = PyCFunction_GetSelf(((__pyx_CyFunctionObject*)cyfunc)->func); + if (unlikely(!self) && PyErr_Occurred()) return NULL; +#else + self = ((PyCFunctionObject*)cyfunc)->m_self; +#endif + break; + default: + return NULL; + } + #if PY_VERSION_HEX < 0x030e00A6 + size_t nargs_value = (size_t) nargs; + #else + Py_ssize_t nargs_value = nargs; + #endif + return ((__Pyx_PyCMethod)(void(*)(void))meth)(self, cls, args, nargs_value, kwnames); +} +#endif +static PyType_Slot __pyx_CyFunctionType_slots[] = { + {Py_tp_dealloc, (void *)__Pyx_CyFunction_dealloc}, + {Py_tp_repr, (void *)__Pyx_CyFunction_repr}, + {Py_tp_call, (void *)__Pyx_CyFunction_CallAsMethod}, + {Py_tp_traverse, (void *)__Pyx_CyFunction_traverse}, + {Py_tp_clear, (void *)__Pyx_CyFunction_clear}, + {Py_tp_methods, (void *)__pyx_CyFunction_methods}, + {Py_tp_members, (void *)__pyx_CyFunction_members}, + {Py_tp_getset, (void *)__pyx_CyFunction_getsets}, + {Py_tp_descr_get, (void *)__Pyx_PyMethod_New}, + {0, 0}, +}; +static PyType_Spec __pyx_CyFunctionType_spec = { + __PYX_TYPE_MODULE_PREFIX "cython_function_or_method", + sizeof(__pyx_CyFunctionObject), + 0, +#ifdef Py_TPFLAGS_METHOD_DESCRIPTOR + Py_TPFLAGS_METHOD_DESCRIPTOR | +#endif +#if CYTHON_METH_FASTCALL +#if defined(Py_TPFLAGS_HAVE_VECTORCALL) + Py_TPFLAGS_HAVE_VECTORCALL | +#elif defined(_Py_TPFLAGS_HAVE_VECTORCALL) + _Py_TPFLAGS_HAVE_VECTORCALL | +#endif +#endif // CYTHON_METH_FASTCALL +#if PY_VERSION_HEX >= 0x030C0000 && !CYTHON_COMPILING_IN_LIMITED_API + Py_TPFLAGS_MANAGED_DICT | +#endif + Py_TPFLAGS_IMMUTABLETYPE | Py_TPFLAGS_DISALLOW_INSTANTIATION | + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE, + __pyx_CyFunctionType_slots +}; +static int __pyx_CyFunction_init(PyObject *module) { + __pyx_mstatetype *mstate = __Pyx_PyModule_GetState(module); + mstate->__pyx_CyFunctionType = __Pyx_FetchCommonTypeFromSpec( + mstate->__pyx_CommonTypesMetaclassType, module, &__pyx_CyFunctionType_spec, NULL); + if (unlikely(mstate->__pyx_CyFunctionType == NULL)) { + return -1; + } + return 0; +} +static CYTHON_INLINE PyObject *__Pyx_CyFunction_InitDefaults(PyObject *func, PyTypeObject *defaults_type) { + __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; + m->defaults = PyObject_CallObject((PyObject*)defaults_type, NULL); // _PyObject_New(defaults_type); + if (unlikely(!m->defaults)) + return NULL; + return m->defaults; +} +static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *func, PyObject *tuple) { + __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; + m->defaults_tuple = tuple; + Py_INCREF(tuple); +} +static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *func, PyObject *dict) { + __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; + m->defaults_kwdict = dict; + Py_INCREF(dict); +} +static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *func, PyObject *dict) { + __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; + m->func_annotations = dict; + Py_INCREF(dict); +} + +/* CythonFunction */ +static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml, int flags, PyObject* qualname, + PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) { + PyObject *op = __Pyx_CyFunction_Init( + PyObject_GC_New(__pyx_CyFunctionObject, __pyx_mstate_global->__pyx_CyFunctionType), + ml, flags, qualname, closure, module, globals, code + ); + if (likely(op)) { + PyObject_GC_Track(op); + } + return op; +} + +/* CLineInTraceback (used by AddTraceback) */ +#if CYTHON_CLINE_IN_TRACEBACK && CYTHON_CLINE_IN_TRACEBACK_RUNTIME +#if CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX < 0x030A0000 +#define __Pyx_PyProbablyModule_GetDict(o) __Pyx_XNewRef(PyModule_GetDict(o)) +#elif !CYTHON_COMPILING_IN_CPYTHON || CYTHON_COMPILING_IN_CPYTHON_FREETHREADING +#define __Pyx_PyProbablyModule_GetDict(o) PyObject_GenericGetDict(o, NULL); +#else +PyObject* __Pyx_PyProbablyModule_GetDict(PyObject *o) { + PyObject **dict_ptr = _PyObject_GetDictPtr(o); + return dict_ptr ? __Pyx_XNewRef(*dict_ptr) : NULL; +} +#endif +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line) { + PyObject *use_cline = NULL; + PyObject *ptype, *pvalue, *ptraceback; + PyObject *cython_runtime_dict; + CYTHON_MAYBE_UNUSED_VAR(tstate); + if (unlikely(!__pyx_mstate_global->__pyx_cython_runtime)) { + return c_line; + } + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); + cython_runtime_dict = __Pyx_PyProbablyModule_GetDict(__pyx_mstate_global->__pyx_cython_runtime); + if (likely(cython_runtime_dict)) { + __PYX_PY_DICT_LOOKUP_IF_MODIFIED( + use_cline, cython_runtime_dict, + __Pyx_PyDict_SetDefault(cython_runtime_dict, __pyx_mstate_global->__pyx_n_u_cline_in_traceback, Py_False)) + } + if (use_cline == NULL || use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { + c_line = 0; + } + Py_XDECREF(use_cline); + Py_XDECREF(cython_runtime_dict); + __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); + return c_line; +} +#endif + +/* CodeObjectCache (used by AddTraceback) */ +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { + int start = 0, mid = 0, end = count - 1; + if (end >= 0 && code_line > entries[end].code_line) { + return count; + } + while (start < end) { + mid = start + (end - start) / 2; + if (code_line < entries[mid].code_line) { + end = mid; + } else if (code_line > entries[mid].code_line) { + start = mid + 1; + } else { + return mid; + } + } + if (code_line <= entries[mid].code_line) { + return mid; + } else { + return mid + 1; + } +} +static __Pyx_CachedCodeObjectType *__pyx__find_code_object(struct __Pyx_CodeObjectCache *code_cache, int code_line) { + __Pyx_CachedCodeObjectType* code_object; + int pos; + if (unlikely(!code_line) || unlikely(!code_cache->entries)) { + return NULL; + } + pos = __pyx_bisect_code_objects(code_cache->entries, code_cache->count, code_line); + if (unlikely(pos >= code_cache->count) || unlikely(code_cache->entries[pos].code_line != code_line)) { + return NULL; + } + code_object = code_cache->entries[pos].code_object; + Py_INCREF(code_object); + return code_object; +} +static __Pyx_CachedCodeObjectType *__pyx_find_code_object(int code_line) { +#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING && !CYTHON_ATOMICS + (void)__pyx__find_code_object; + return NULL; // Most implementation should have atomics. But otherwise, don't make it thread-safe, just miss. +#else + struct __Pyx_CodeObjectCache *code_cache = &__pyx_mstate_global->__pyx_code_cache; +#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING + __pyx_nonatomic_int_type old_count = __pyx_atomic_incr_acq_rel(&code_cache->accessor_count); + if (old_count < 0) { + __pyx_atomic_decr_acq_rel(&code_cache->accessor_count); + return NULL; + } +#endif + __Pyx_CachedCodeObjectType *result = __pyx__find_code_object(code_cache, code_line); +#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING + __pyx_atomic_decr_acq_rel(&code_cache->accessor_count); +#endif + return result; +#endif +} +static void __pyx__insert_code_object(struct __Pyx_CodeObjectCache *code_cache, int code_line, __Pyx_CachedCodeObjectType* code_object) +{ + int pos, i; + __Pyx_CodeObjectCacheEntry* entries = code_cache->entries; + if (unlikely(!code_line)) { + return; + } + if (unlikely(!entries)) { + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); + if (likely(entries)) { + code_cache->entries = entries; + code_cache->max_count = 64; + code_cache->count = 1; + entries[0].code_line = code_line; + entries[0].code_object = code_object; + Py_INCREF(code_object); + } + return; + } + pos = __pyx_bisect_code_objects(code_cache->entries, code_cache->count, code_line); + if ((pos < code_cache->count) && unlikely(code_cache->entries[pos].code_line == code_line)) { + __Pyx_CachedCodeObjectType* tmp = entries[pos].code_object; + entries[pos].code_object = code_object; + Py_INCREF(code_object); + Py_DECREF(tmp); + return; + } + if (code_cache->count == code_cache->max_count) { + int new_max = code_cache->max_count + 64; + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( + code_cache->entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); + if (unlikely(!entries)) { + return; + } + code_cache->entries = entries; + code_cache->max_count = new_max; + } + for (i=code_cache->count; i>pos; i--) { + entries[i] = entries[i-1]; + } + entries[pos].code_line = code_line; + entries[pos].code_object = code_object; + code_cache->count++; + Py_INCREF(code_object); +} +static void __pyx_insert_code_object(int code_line, __Pyx_CachedCodeObjectType* code_object) { +#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING && !CYTHON_ATOMICS + (void)__pyx__insert_code_object; + return; // Most implementation should have atomics. But otherwise, don't make it thread-safe, just fail. +#else + struct __Pyx_CodeObjectCache *code_cache = &__pyx_mstate_global->__pyx_code_cache; +#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING + __pyx_nonatomic_int_type expected = 0; + if (!__pyx_atomic_int_cmp_exchange(&code_cache->accessor_count, &expected, INT_MIN)) { + return; + } +#endif + __pyx__insert_code_object(code_cache, code_line, code_object); +#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING + __pyx_atomic_sub(&code_cache->accessor_count, INT_MIN); +#endif +#endif +} + +/* AddTraceback */ +#include "compile.h" +#include "frameobject.h" +#include "traceback.h" +#if PY_VERSION_HEX >= 0x030b00a6 && !CYTHON_COMPILING_IN_LIMITED_API && !defined(PYPY_VERSION) + #ifndef Py_BUILD_CORE + #define Py_BUILD_CORE 1 + #endif + #include "internal/pycore_frame.h" +#endif +#if CYTHON_COMPILING_IN_LIMITED_API +static PyObject *__Pyx_PyCode_Replace_For_AddTraceback(PyObject *code, PyObject *scratch_dict, + PyObject *firstlineno, PyObject *name) { + PyObject *replace = NULL; + if (unlikely(PyDict_SetItemString(scratch_dict, "co_firstlineno", firstlineno))) return NULL; + if (unlikely(PyDict_SetItemString(scratch_dict, "co_name", name))) return NULL; + replace = PyObject_GetAttrString(code, "replace"); + if (likely(replace)) { + PyObject *result = PyObject_Call(replace, __pyx_mstate_global->__pyx_empty_tuple, scratch_dict); + Py_DECREF(replace); + return result; + } + PyErr_Clear(); + return NULL; +} +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename) { + PyObject *code_object = NULL, *py_py_line = NULL, *py_funcname = NULL, *dict = NULL; + PyObject *replace = NULL, *getframe = NULL, *frame = NULL; + PyObject *exc_type, *exc_value, *exc_traceback; + int success = 0; + if (c_line) { + c_line = __Pyx_CLineForTraceback(__Pyx_PyThreadState_Current, c_line); + } + PyErr_Fetch(&exc_type, &exc_value, &exc_traceback); + code_object = __pyx_find_code_object(c_line ? -c_line : py_line); + if (!code_object) { + code_object = Py_CompileString("_getframe()", filename, Py_eval_input); + if (unlikely(!code_object)) goto bad; + py_py_line = PyLong_FromLong(py_line); + if (unlikely(!py_py_line)) goto bad; + if (c_line) { + py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + } else { + py_funcname = PyUnicode_FromString(funcname); + } + if (unlikely(!py_funcname)) goto bad; + dict = PyDict_New(); + if (unlikely(!dict)) goto bad; + { + PyObject *old_code_object = code_object; + code_object = __Pyx_PyCode_Replace_For_AddTraceback(code_object, dict, py_py_line, py_funcname); + Py_DECREF(old_code_object); + } + if (unlikely(!code_object)) goto bad; + __pyx_insert_code_object(c_line ? -c_line : py_line, code_object); + } else { + dict = PyDict_New(); + } + getframe = PySys_GetObject("_getframe"); + if (unlikely(!getframe)) goto bad; + if (unlikely(PyDict_SetItemString(dict, "_getframe", getframe))) goto bad; + frame = PyEval_EvalCode(code_object, dict, dict); + if (unlikely(!frame) || frame == Py_None) goto bad; + success = 1; + bad: + PyErr_Restore(exc_type, exc_value, exc_traceback); + Py_XDECREF(code_object); + Py_XDECREF(py_py_line); + Py_XDECREF(py_funcname); + Py_XDECREF(dict); + Py_XDECREF(replace); + if (success) { + PyTraceBack_Here( + (struct _frame*)frame); + } + Py_XDECREF(frame); +} +#else +static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( + const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = NULL; + PyObject *py_funcname = NULL; + if (c_line) { + py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + if (!py_funcname) goto bad; + funcname = PyUnicode_AsUTF8(py_funcname); + if (!funcname) goto bad; + } + py_code = PyCode_NewEmpty(filename, funcname, py_line); + Py_XDECREF(py_funcname); + return py_code; +bad: + Py_XDECREF(py_funcname); + return NULL; +} +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyFrameObject *py_frame = 0; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject *ptype, *pvalue, *ptraceback; + if (c_line) { + c_line = __Pyx_CLineForTraceback(tstate, c_line); + } + py_code = __pyx_find_code_object(c_line ? -c_line : py_line); + if (!py_code) { + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); + py_code = __Pyx_CreateCodeObjectForTraceback( + funcname, c_line, py_line, filename); + if (!py_code) { + /* If the code object creation fails, then we should clear the + fetched exception references and propagate the new exception */ + Py_XDECREF(ptype); + Py_XDECREF(pvalue); + Py_XDECREF(ptraceback); + goto bad; + } + __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); + __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); + } + py_frame = PyFrame_New( + tstate, /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + __pyx_mstate_global->__pyx_d, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + __Pyx_PyFrame_SetLineNumber(py_frame, py_line); + PyTraceBack_Here(py_frame); +bad: + Py_XDECREF(py_code); + Py_XDECREF(py_frame); +} +#endif + +/* Declarations */ +#if CYTHON_CCOMPLEX && (1) && (!0 || __cplusplus) + #ifdef __cplusplus + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + return ::std::complex< double >(x, y); + } + #else + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + return x + y*(__pyx_t_double_complex)_Complex_I; + } + #endif +#else + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + __pyx_t_double_complex z; + z.real = x; + z.imag = y; + return z; + } +#endif + +/* Arithmetic */ +#if CYTHON_CCOMPLEX && (1) && (!0 || __cplusplus) +#else + static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + return (a.real == b.real) && (a.imag == b.imag); + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real + b.real; + z.imag = a.imag + b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real - b.real; + z.imag = a.imag - b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real * b.real - a.imag * b.imag; + z.imag = a.real * b.imag + a.imag * b.real; + return z; + } + #if 1 + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + if (b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); + } else if (fabs(b.real) >= fabs(b.imag)) { + if (b.real == 0 && b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag); + } else { + double r = b.imag / b.real; + double s = (double)(1.0) / (b.real + b.imag * r); + return __pyx_t_double_complex_from_parts( + (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); + } + } else { + double r = b.real / b.imag; + double s = (double)(1.0) / (b.imag + b.real * r); + return __pyx_t_double_complex_from_parts( + (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); + } + } + #else + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + if (b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); + } else { + double denom = b.real * b.real + b.imag * b.imag; + return __pyx_t_double_complex_from_parts( + (a.real * b.real + a.imag * b.imag) / denom, + (a.imag * b.real - a.real * b.imag) / denom); + } + } + #endif + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) { + __pyx_t_double_complex z; + z.real = -a.real; + z.imag = -a.imag; + return z; + } + static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) { + return (a.real == 0) && (a.imag == 0); + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) { + __pyx_t_double_complex z; + z.real = a.real; + z.imag = -a.imag; + return z; + } + #if 1 + static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) { + #if !defined(HAVE_HYPOT) || defined(_MSC_VER) + return sqrt(z.real*z.real + z.imag*z.imag); + #else + return hypot(z.real, z.imag); + #endif + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + double r, lnr, theta, z_r, z_theta; + if (b.imag == 0 && b.real == (int)b.real) { + if (b.real < 0) { + double denom = a.real * a.real + a.imag * a.imag; + a.real = a.real / denom; + a.imag = -a.imag / denom; + b.real = -b.real; + } + switch ((int)b.real) { + case 0: + z.real = 1; + z.imag = 0; + return z; + case 1: + return a; + case 2: + return __Pyx_c_prod_double(a, a); + case 3: + z = __Pyx_c_prod_double(a, a); + return __Pyx_c_prod_double(z, a); + case 4: + z = __Pyx_c_prod_double(a, a); + return __Pyx_c_prod_double(z, z); + } + } + if (a.imag == 0) { + if (a.real == 0) { + return a; + } else if ((b.imag == 0) && (a.real >= 0)) { + z.real = pow(a.real, b.real); + z.imag = 0; + return z; + } else if (a.real > 0) { + r = a.real; + theta = 0; + } else { + r = -a.real; + theta = atan2(0.0, -1.0); + } + } else { + r = __Pyx_c_abs_double(a); + theta = atan2(a.imag, a.real); + } + lnr = log(r); + z_r = exp(lnr * b.real - theta * b.imag); + z_theta = theta * b.real + lnr * b.imag; + z.real = z_r * cos(z_theta); + z.imag = z_r * sin(z_theta); + return z; + } + #endif +#endif + +/* FromPy */ +static __pyx_t_double_complex __Pyx_PyComplex_As___pyx_t_double_complex(PyObject* o) { +#if CYTHON_COMPILING_IN_LIMITED_API + double real=-1.0, imag=-1.0; + real = PyComplex_RealAsDouble(o); + if (unlikely(real == -1.0 && PyErr_Occurred())) goto end; + imag = PyComplex_ImagAsDouble(o); + end: + return __pyx_t_double_complex_from_parts( + (double)real, (double)imag + ); +#else + Py_complex cval; +#if !CYTHON_COMPILING_IN_PYPY && !CYTHON_COMPILING_IN_GRAAL + if (PyComplex_CheckExact(o)) + cval = ((PyComplexObject *)o)->cval; + else +#endif + cval = PyComplex_AsCComplex(o); + return __pyx_t_double_complex_from_parts( + (double)cval.real, + (double)cval.imag); +#endif +} + +/* CIntFromPyVerify */ +#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) +#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) +#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ + {\ + func_type value = func_value;\ + if (sizeof(target_type) < sizeof(func_type)) {\ + if (unlikely(value != (func_type) (target_type) value)) {\ + func_type zero = 0;\ + if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ + return (target_type) -1;\ + if (is_unsigned && unlikely(value < zero))\ + goto raise_neg_overflow;\ + else\ + goto raise_overflow;\ + }\ + }\ + return (target_type) value;\ + } + +/* CIntFromPy */ +static CYTHON_INLINE int __Pyx_PyLong_As_int(PyObject *x) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const int neg_one = (int) -1, const_zero = (int) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; + if (unlikely(!PyLong_Check(x))) { + int val; + PyObject *tmp = __Pyx_PyNumber_Long(x); + if (!tmp) return (int) -1; + val = __Pyx_PyLong_As_int(tmp); + Py_DECREF(tmp); + return val; + } + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + if (unlikely(__Pyx_PyLong_IsNeg(x))) { + goto raise_neg_overflow; + } else if (__Pyx_PyLong_IsCompact(x)) { + __PYX_VERIFY_RETURN_INT(int, __Pyx_compact_upylong, __Pyx_PyLong_CompactValueUnsigned(x)) + } else { + const digit* digits = __Pyx_PyLong_Digits(x); + assert(__Pyx_PyLong_DigitCount(x) > 1); + switch (__Pyx_PyLong_DigitCount(x)) { + case 2: + if ((8 * sizeof(int) > 1 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(int) >= 2 * PyLong_SHIFT)) { + return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 3: + if ((8 * sizeof(int) > 2 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(int) >= 3 * PyLong_SHIFT)) { + return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 4: + if ((8 * sizeof(int) > 3 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(int) >= 4 * PyLong_SHIFT)) { + return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + } + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A7 + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if ((sizeof(int) <= sizeof(unsigned long))) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) + } else if ((sizeof(int) <= sizeof(unsigned PY_LONG_LONG))) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + if (__Pyx_PyLong_IsCompact(x)) { + __PYX_VERIFY_RETURN_INT(int, __Pyx_compact_pylong, __Pyx_PyLong_CompactValue(x)) + } else { + const digit* digits = __Pyx_PyLong_Digits(x); + assert(__Pyx_PyLong_DigitCount(x) > 1); + switch (__Pyx_PyLong_SignedDigitCount(x)) { + case -2: + if ((8 * sizeof(int) - 1 > 1 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(int) - 1 > 2 * PyLong_SHIFT)) { + return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 2: + if ((8 * sizeof(int) > 1 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(int) - 1 > 2 * PyLong_SHIFT)) { + return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -3: + if ((8 * sizeof(int) - 1 > 2 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(int) - 1 > 3 * PyLong_SHIFT)) { + return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 3: + if ((8 * sizeof(int) > 2 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(int) - 1 > 3 * PyLong_SHIFT)) { + return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -4: + if ((8 * sizeof(int) - 1 > 3 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(int) - 1 > 4 * PyLong_SHIFT)) { + return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 4: + if ((8 * sizeof(int) > 3 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(int) - 1 > 4 * PyLong_SHIFT)) { + return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + } + } +#endif + if ((sizeof(int) <= sizeof(long))) { + __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) + } else if ((sizeof(int) <= sizeof(PY_LONG_LONG))) { + __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) + } + } + { + int val; + int ret = -1; +#if PY_VERSION_HEX >= 0x030d00A6 && !CYTHON_COMPILING_IN_LIMITED_API + Py_ssize_t bytes_copied = PyLong_AsNativeBytes( + x, &val, sizeof(val), Py_ASNATIVEBYTES_NATIVE_ENDIAN | (is_unsigned ? Py_ASNATIVEBYTES_UNSIGNED_BUFFER | Py_ASNATIVEBYTES_REJECT_NEGATIVE : 0)); + if (unlikely(bytes_copied == -1)) { + } else if (unlikely(bytes_copied > (Py_ssize_t) sizeof(val))) { + goto raise_overflow; + } else { + ret = 0; + } +#elif PY_VERSION_HEX < 0x030d0000 && !(CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) || defined(_PyLong_AsByteArray) + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + ret = _PyLong_AsByteArray((PyLongObject *)x, + bytes, sizeof(val), + is_little, !is_unsigned); +#else + PyObject *v; + PyObject *stepval = NULL, *mask = NULL, *shift = NULL; + int bits, remaining_bits, is_negative = 0; + int chunk_size = (sizeof(long) < 8) ? 30 : 62; + if (likely(PyLong_CheckExact(x))) { + v = __Pyx_NewRef(x); + } else { + v = PyNumber_Long(x); + if (unlikely(!v)) return (int) -1; + assert(PyLong_CheckExact(v)); + } + { + int result = PyObject_RichCompareBool(v, Py_False, Py_LT); + if (unlikely(result < 0)) { + Py_DECREF(v); + return (int) -1; + } + is_negative = result == 1; + } + if (is_unsigned && unlikely(is_negative)) { + Py_DECREF(v); + goto raise_neg_overflow; + } else if (is_negative) { + stepval = PyNumber_Invert(v); + Py_DECREF(v); + if (unlikely(!stepval)) + return (int) -1; + } else { + stepval = v; + } + v = NULL; + val = (int) 0; + mask = PyLong_FromLong((1L << chunk_size) - 1); if (unlikely(!mask)) goto done; + shift = PyLong_FromLong(chunk_size); if (unlikely(!shift)) goto done; + for (bits = 0; bits < (int) sizeof(int) * 8 - chunk_size; bits += chunk_size) { + PyObject *tmp, *digit; + long idigit; + digit = PyNumber_And(stepval, mask); + if (unlikely(!digit)) goto done; + idigit = PyLong_AsLong(digit); + Py_DECREF(digit); + if (unlikely(idigit < 0)) goto done; + val |= ((int) idigit) << bits; + tmp = PyNumber_Rshift(stepval, shift); + if (unlikely(!tmp)) goto done; + Py_DECREF(stepval); stepval = tmp; + } + Py_DECREF(shift); shift = NULL; + Py_DECREF(mask); mask = NULL; + { + long idigit = PyLong_AsLong(stepval); + if (unlikely(idigit < 0)) goto done; + remaining_bits = ((int) sizeof(int) * 8) - bits - (is_unsigned ? 0 : 1); + if (unlikely(idigit >= (1L << remaining_bits))) + goto raise_overflow; + val |= ((int) idigit) << bits; + } + if (!is_unsigned) { + if (unlikely(val & (((int) 1) << (sizeof(int) * 8 - 1)))) + goto raise_overflow; + if (is_negative) + val = ~val; + } + ret = 0; + done: + Py_XDECREF(shift); + Py_XDECREF(mask); + Py_XDECREF(stepval); +#endif + if (unlikely(ret)) + return (int) -1; + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to int"); + return (int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to int"); + return (int) -1; +} + +/* PyObjectVectorCallKwBuilder (used by CIntToPy) */ +#if CYTHON_VECTORCALL +static int __Pyx_VectorcallBuilder_AddArg(PyObject *key, PyObject *value, PyObject *builder, PyObject **args, int n) { + (void)__Pyx_PyObject_FastCallDict; + if (__Pyx_PyTuple_SET_ITEM(builder, n, key) != (0)) return -1; + Py_INCREF(key); + args[n] = value; + return 0; +} +CYTHON_UNUSED static int __Pyx_VectorcallBuilder_AddArg_Check(PyObject *key, PyObject *value, PyObject *builder, PyObject **args, int n) { + (void)__Pyx_VectorcallBuilder_AddArgStr; + if (unlikely(!PyUnicode_Check(key))) { + PyErr_SetString(PyExc_TypeError, "keywords must be strings"); + return -1; + } + return __Pyx_VectorcallBuilder_AddArg(key, value, builder, args, n); +} +static int __Pyx_VectorcallBuilder_AddArgStr(const char *key, PyObject *value, PyObject *builder, PyObject **args, int n) { + PyObject *pyKey = PyUnicode_FromString(key); + if (!pyKey) return -1; + return __Pyx_VectorcallBuilder_AddArg(pyKey, value, builder, args, n); +} +#else // CYTHON_VECTORCALL +CYTHON_UNUSED static int __Pyx_VectorcallBuilder_AddArg_Check(PyObject *key, PyObject *value, PyObject *builder, CYTHON_UNUSED PyObject **args, CYTHON_UNUSED int n) { + if (unlikely(!PyUnicode_Check(key))) { + PyErr_SetString(PyExc_TypeError, "keywords must be strings"); + return -1; + } + return PyDict_SetItem(builder, key, value); +} +#endif + +/* CIntToPy */ +static CYTHON_INLINE PyObject* __Pyx_PyLong_From_long(long value) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const long neg_one = (long) -1, const_zero = (long) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(long) < sizeof(long)) { + return PyLong_FromLong((long) value); + } else if (sizeof(long) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#if !CYTHON_COMPILING_IN_PYPY + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(long) <= sizeof(long)) { + return PyLong_FromLong((long) value); + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); + } + } + { + unsigned char *bytes = (unsigned char *)&value; +#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4 + if (is_unsigned) { + return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1); + } else { + return PyLong_FromNativeBytes(bytes, sizeof(value), -1); + } +#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000 + int one = 1; int little = (int)*(unsigned char *)&one; + return _PyLong_FromByteArray(bytes, sizeof(long), + little, !is_unsigned); +#else + int one = 1; int little = (int)*(unsigned char *)&one; + PyObject *from_bytes, *result = NULL, *kwds = NULL; + PyObject *py_bytes = NULL, *order_str = NULL; + from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes"); + if (!from_bytes) return NULL; + py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(long)); + if (!py_bytes) goto limited_bad; + order_str = PyUnicode_FromString(little ? "little" : "big"); + if (!order_str) goto limited_bad; + { + PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str }; + if (!is_unsigned) { + kwds = __Pyx_MakeVectorcallBuilderKwds(1); + if (!kwds) goto limited_bad; + if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad; + } + result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds); + } + limited_bad: + Py_XDECREF(kwds); + Py_XDECREF(order_str); + Py_XDECREF(py_bytes); + Py_XDECREF(from_bytes); + return result; +#endif + } +} + +/* CIntToPy */ +static CYTHON_INLINE PyObject* __Pyx_PyLong_From_int(int value) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const int neg_one = (int) -1, const_zero = (int) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(int) < sizeof(long)) { + return PyLong_FromLong((long) value); + } else if (sizeof(int) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#if !CYTHON_COMPILING_IN_PYPY + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(int) <= sizeof(long)) { + return PyLong_FromLong((long) value); + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); + } + } + { + unsigned char *bytes = (unsigned char *)&value; +#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4 + if (is_unsigned) { + return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1); + } else { + return PyLong_FromNativeBytes(bytes, sizeof(value), -1); + } +#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000 + int one = 1; int little = (int)*(unsigned char *)&one; + return _PyLong_FromByteArray(bytes, sizeof(int), + little, !is_unsigned); +#else + int one = 1; int little = (int)*(unsigned char *)&one; + PyObject *from_bytes, *result = NULL, *kwds = NULL; + PyObject *py_bytes = NULL, *order_str = NULL; + from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes"); + if (!from_bytes) return NULL; + py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(int)); + if (!py_bytes) goto limited_bad; + order_str = PyUnicode_FromString(little ? "little" : "big"); + if (!order_str) goto limited_bad; + { + PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str }; + if (!is_unsigned) { + kwds = __Pyx_MakeVectorcallBuilderKwds(1); + if (!kwds) goto limited_bad; + if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad; + } + result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds); + } + limited_bad: + Py_XDECREF(kwds); + Py_XDECREF(order_str); + Py_XDECREF(py_bytes); + Py_XDECREF(from_bytes); + return result; +#endif + } +} + +/* FormatTypeName */ +#if CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX < 0x030d0000 +static __Pyx_TypeName +__Pyx_PyType_GetFullyQualifiedName(PyTypeObject* tp) +{ + PyObject *module = NULL, *name = NULL, *result = NULL; + #if __PYX_LIMITED_VERSION_HEX < 0x030b0000 + name = __Pyx_PyObject_GetAttrStr((PyObject *)tp, + __pyx_mstate_global->__pyx_n_u_qualname); + #else + name = PyType_GetQualName(tp); + #endif + if (unlikely(name == NULL) || unlikely(!PyUnicode_Check(name))) goto bad; + module = __Pyx_PyObject_GetAttrStr((PyObject *)tp, + __pyx_mstate_global->__pyx_n_u_module); + if (unlikely(module == NULL) || unlikely(!PyUnicode_Check(module))) goto bad; + if (PyUnicode_CompareWithASCIIString(module, "builtins") == 0) { + result = name; + name = NULL; + goto done; + } + result = PyUnicode_FromFormat("%U.%U", module, name); + if (unlikely(result == NULL)) goto bad; + done: + Py_XDECREF(name); + Py_XDECREF(module); + return result; + bad: + PyErr_Clear(); + if (name) { + result = name; + name = NULL; + } else { + result = __Pyx_NewRef(__pyx_mstate_global->__pyx_kp_u__2); + } + goto done; +} +#endif + +/* CIntFromPy */ +static CYTHON_INLINE long __Pyx_PyLong_As_long(PyObject *x) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const long neg_one = (long) -1, const_zero = (long) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; + if (unlikely(!PyLong_Check(x))) { + long val; + PyObject *tmp = __Pyx_PyNumber_Long(x); + if (!tmp) return (long) -1; + val = __Pyx_PyLong_As_long(tmp); + Py_DECREF(tmp); + return val; + } + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + if (unlikely(__Pyx_PyLong_IsNeg(x))) { + goto raise_neg_overflow; + } else if (__Pyx_PyLong_IsCompact(x)) { + __PYX_VERIFY_RETURN_INT(long, __Pyx_compact_upylong, __Pyx_PyLong_CompactValueUnsigned(x)) + } else { + const digit* digits = __Pyx_PyLong_Digits(x); + assert(__Pyx_PyLong_DigitCount(x) > 1); + switch (__Pyx_PyLong_DigitCount(x)) { + case 2: + if ((8 * sizeof(long) > 1 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(long) >= 2 * PyLong_SHIFT)) { + return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 3: + if ((8 * sizeof(long) > 2 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(long) >= 3 * PyLong_SHIFT)) { + return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 4: + if ((8 * sizeof(long) > 3 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(long) >= 4 * PyLong_SHIFT)) { + return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + } + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A7 + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (long) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if ((sizeof(long) <= sizeof(unsigned long))) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) + } else if ((sizeof(long) <= sizeof(unsigned PY_LONG_LONG))) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + if (__Pyx_PyLong_IsCompact(x)) { + __PYX_VERIFY_RETURN_INT(long, __Pyx_compact_pylong, __Pyx_PyLong_CompactValue(x)) + } else { + const digit* digits = __Pyx_PyLong_Digits(x); + assert(__Pyx_PyLong_DigitCount(x) > 1); + switch (__Pyx_PyLong_SignedDigitCount(x)) { + case -2: + if ((8 * sizeof(long) - 1 > 1 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(long) - 1 > 2 * PyLong_SHIFT)) { + return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 2: + if ((8 * sizeof(long) > 1 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(long) - 1 > 2 * PyLong_SHIFT)) { + return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -3: + if ((8 * sizeof(long) - 1 > 2 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(long) - 1 > 3 * PyLong_SHIFT)) { + return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 3: + if ((8 * sizeof(long) > 2 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(long) - 1 > 3 * PyLong_SHIFT)) { + return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -4: + if ((8 * sizeof(long) - 1 > 3 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(long) - 1 > 4 * PyLong_SHIFT)) { + return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 4: + if ((8 * sizeof(long) > 3 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(long) - 1 > 4 * PyLong_SHIFT)) { + return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + } + } +#endif + if ((sizeof(long) <= sizeof(long))) { + __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) + } else if ((sizeof(long) <= sizeof(PY_LONG_LONG))) { + __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) + } + } + { + long val; + int ret = -1; +#if PY_VERSION_HEX >= 0x030d00A6 && !CYTHON_COMPILING_IN_LIMITED_API + Py_ssize_t bytes_copied = PyLong_AsNativeBytes( + x, &val, sizeof(val), Py_ASNATIVEBYTES_NATIVE_ENDIAN | (is_unsigned ? Py_ASNATIVEBYTES_UNSIGNED_BUFFER | Py_ASNATIVEBYTES_REJECT_NEGATIVE : 0)); + if (unlikely(bytes_copied == -1)) { + } else if (unlikely(bytes_copied > (Py_ssize_t) sizeof(val))) { + goto raise_overflow; + } else { + ret = 0; + } +#elif PY_VERSION_HEX < 0x030d0000 && !(CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) || defined(_PyLong_AsByteArray) + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + ret = _PyLong_AsByteArray((PyLongObject *)x, + bytes, sizeof(val), + is_little, !is_unsigned); +#else + PyObject *v; + PyObject *stepval = NULL, *mask = NULL, *shift = NULL; + int bits, remaining_bits, is_negative = 0; + int chunk_size = (sizeof(long) < 8) ? 30 : 62; + if (likely(PyLong_CheckExact(x))) { + v = __Pyx_NewRef(x); + } else { + v = PyNumber_Long(x); + if (unlikely(!v)) return (long) -1; + assert(PyLong_CheckExact(v)); + } + { + int result = PyObject_RichCompareBool(v, Py_False, Py_LT); + if (unlikely(result < 0)) { + Py_DECREF(v); + return (long) -1; + } + is_negative = result == 1; + } + if (is_unsigned && unlikely(is_negative)) { + Py_DECREF(v); + goto raise_neg_overflow; + } else if (is_negative) { + stepval = PyNumber_Invert(v); + Py_DECREF(v); + if (unlikely(!stepval)) + return (long) -1; + } else { + stepval = v; + } + v = NULL; + val = (long) 0; + mask = PyLong_FromLong((1L << chunk_size) - 1); if (unlikely(!mask)) goto done; + shift = PyLong_FromLong(chunk_size); if (unlikely(!shift)) goto done; + for (bits = 0; bits < (int) sizeof(long) * 8 - chunk_size; bits += chunk_size) { + PyObject *tmp, *digit; + long idigit; + digit = PyNumber_And(stepval, mask); + if (unlikely(!digit)) goto done; + idigit = PyLong_AsLong(digit); + Py_DECREF(digit); + if (unlikely(idigit < 0)) goto done; + val |= ((long) idigit) << bits; + tmp = PyNumber_Rshift(stepval, shift); + if (unlikely(!tmp)) goto done; + Py_DECREF(stepval); stepval = tmp; + } + Py_DECREF(shift); shift = NULL; + Py_DECREF(mask); mask = NULL; + { + long idigit = PyLong_AsLong(stepval); + if (unlikely(idigit < 0)) goto done; + remaining_bits = ((int) sizeof(long) * 8) - bits - (is_unsigned ? 0 : 1); + if (unlikely(idigit >= (1L << remaining_bits))) + goto raise_overflow; + val |= ((long) idigit) << bits; + } + if (!is_unsigned) { + if (unlikely(val & (((long) 1) << (sizeof(long) * 8 - 1)))) + goto raise_overflow; + if (is_negative) + val = ~val; + } + ret = 0; + done: + Py_XDECREF(shift); + Py_XDECREF(mask); + Py_XDECREF(stepval); +#endif + if (unlikely(ret)) + return (long) -1; + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to long"); + return (long) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to long"); + return (long) -1; +} + +/* FastTypeChecks */ +#if CYTHON_COMPILING_IN_CPYTHON +static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { + while (a) { + a = __Pyx_PyType_GetSlot(a, tp_base, PyTypeObject*); + if (a == b) + return 1; + } + return b == &PyBaseObject_Type; +} +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { + PyObject *mro; + if (a == b) return 1; + mro = a->tp_mro; + if (likely(mro)) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(mro); + for (i = 0; i < n; i++) { + if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) + return 1; + } + return 0; + } + return __Pyx_InBases(a, b); +} +static CYTHON_INLINE int __Pyx_IsAnySubtype2(PyTypeObject *cls, PyTypeObject *a, PyTypeObject *b) { + PyObject *mro; + if (cls == a || cls == b) return 1; + mro = cls->tp_mro; + if (likely(mro)) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(mro); + for (i = 0; i < n; i++) { + PyObject *base = PyTuple_GET_ITEM(mro, i); + if (base == (PyObject *)a || base == (PyObject *)b) + return 1; + } + return 0; + } + return __Pyx_InBases(cls, a) || __Pyx_InBases(cls, b); +} +static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { + if (exc_type1) { + return __Pyx_IsAnySubtype2((PyTypeObject*)err, (PyTypeObject*)exc_type1, (PyTypeObject*)exc_type2); + } else { + return __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); + } +} +static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + assert(PyExceptionClass_Check(exc_type)); + n = PyTuple_GET_SIZE(tuple); + for (i=0; i>= 8; + ++i; + } + __Pyx_cached_runtime_version = version; + } +} +#endif +static unsigned long __Pyx_get_runtime_version(void) { +#if __PYX_LIMITED_VERSION_HEX >= 0x030b0000 + return Py_Version & ~0xFFUL; +#else + return __Pyx_cached_runtime_version; +#endif +} + +/* SwapException (used by CoroutineBase) */ +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if CYTHON_USE_EXC_INFO_STACK && PY_VERSION_HEX >= 0x030B00a4 + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_value = exc_info->exc_value; + exc_info->exc_value = *value; + if (tmp_value == NULL || tmp_value == Py_None) { + Py_XDECREF(tmp_value); + tmp_value = NULL; + tmp_type = NULL; + tmp_tb = NULL; + } else { + tmp_type = (PyObject*) Py_TYPE(tmp_value); + Py_INCREF(tmp_type); + #if CYTHON_COMPILING_IN_CPYTHON + tmp_tb = ((PyBaseExceptionObject*) tmp_value)->traceback; + Py_XINCREF(tmp_tb); + #else + tmp_tb = PyException_GetTraceback(tmp_value); + #endif + } + #elif CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = *type; + exc_info->exc_value = *value; + exc_info->exc_traceback = *tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = *type; + tstate->exc_value = *value; + tstate->exc_traceback = *tb; + #endif + *type = tmp_type; + *value = tmp_value; + *tb = tmp_tb; +} +#else +static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); + PyErr_SetExcInfo(*type, *value, *tb); + *type = tmp_type; + *value = tmp_value; + *tb = tmp_tb; +} +#endif + +/* PyObjectCall2Args (used by PyObjectCallMethod1) */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { + PyObject *args[3] = {NULL, arg1, arg2}; + return __Pyx_PyObject_FastCall(function, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET); +} + +/* PyObjectCallMethod1 (used by CoroutineBase) */ +#if !(CYTHON_VECTORCALL && (__PYX_LIMITED_VERSION_HEX >= 0x030C0000 || (!CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x03090000))) +static PyObject* __Pyx__PyObject_CallMethod1(PyObject* method, PyObject* arg) { + PyObject *result = __Pyx_PyObject_CallOneArg(method, arg); + Py_DECREF(method); + return result; +} +#endif +static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg) { +#if CYTHON_VECTORCALL && (__PYX_LIMITED_VERSION_HEX >= 0x030C0000 || (!CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x03090000)) + PyObject *args[2] = {obj, arg}; + (void) __Pyx_PyObject_CallOneArg; + (void) __Pyx_PyObject_Call2Args; + return PyObject_VectorcallMethod(method_name, args, 2 | PY_VECTORCALL_ARGUMENTS_OFFSET, NULL); +#else + PyObject *method = NULL, *result; + int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method); + if (likely(is_method)) { + result = __Pyx_PyObject_Call2Args(method, obj, arg); + Py_DECREF(method); + return result; + } + if (unlikely(!method)) return NULL; + return __Pyx__PyObject_CallMethod1(method, arg); +#endif +} + +/* ReturnWithStopIteration (used by CoroutineBase) */ +static void __Pyx__ReturnWithStopIteration(PyObject* value, int async); +static CYTHON_INLINE void __Pyx_ReturnWithStopIteration(PyObject* value, int async, int iternext) { + if (value == Py_None) { + if (async || !iternext) + PyErr_SetNone(async ? PyExc_StopAsyncIteration : PyExc_StopIteration); + return; + } + __Pyx__ReturnWithStopIteration(value, async); +} +static void __Pyx__ReturnWithStopIteration(PyObject* value, int async) { +#if CYTHON_COMPILING_IN_CPYTHON + __Pyx_PyThreadState_declare +#endif + PyObject *exc; + PyObject *exc_type = async ? PyExc_StopAsyncIteration : PyExc_StopIteration; +#if CYTHON_COMPILING_IN_CPYTHON + if ((PY_VERSION_HEX >= (0x030C00A6)) || unlikely(PyTuple_Check(value) || PyExceptionInstance_Check(value))) { + if (PY_VERSION_HEX >= (0x030e00A1)) { + exc = __Pyx_PyObject_CallOneArg(exc_type, value); + } else { + PyObject *args_tuple = PyTuple_New(1); + if (unlikely(!args_tuple)) return; + Py_INCREF(value); + PyTuple_SET_ITEM(args_tuple, 0, value); + exc = PyObject_Call(exc_type, args_tuple, NULL); + Py_DECREF(args_tuple); + } + if (unlikely(!exc)) return; + } else { + Py_INCREF(value); + exc = value; + } + #if CYTHON_FAST_THREAD_STATE + __Pyx_PyThreadState_assign + #if CYTHON_USE_EXC_INFO_STACK + if (!__pyx_tstate->exc_info->exc_value) + #else + if (!__pyx_tstate->exc_type) + #endif + { + Py_INCREF(exc_type); + __Pyx_ErrRestore(exc_type, exc, NULL); + return; + } + #endif +#else + exc = __Pyx_PyObject_CallOneArg(exc_type, value); + if (unlikely(!exc)) return; +#endif + PyErr_SetObject(exc_type, exc); + Py_DECREF(exc); +} + +/* CoroutineBase (used by Generator) */ +#if !CYTHON_COMPILING_IN_LIMITED_API +#include +#if PY_VERSION_HEX >= 0x030b00a6 && !defined(PYPY_VERSION) + #ifndef Py_BUILD_CORE + #define Py_BUILD_CORE 1 + #endif + #include "internal/pycore_frame.h" +#endif +#endif // CYTHON_COMPILING_IN_LIMITED_API +static CYTHON_INLINE void +__Pyx_Coroutine_Undelegate(__pyx_CoroutineObject *gen) { +#if CYTHON_USE_AM_SEND + gen->yieldfrom_am_send = NULL; +#endif + Py_CLEAR(gen->yieldfrom); +} +static int __Pyx_PyGen__FetchStopIterationValue(PyThreadState *__pyx_tstate, PyObject **pvalue) { + PyObject *et, *ev, *tb; + PyObject *value = NULL; + CYTHON_UNUSED_VAR(__pyx_tstate); + __Pyx_ErrFetch(&et, &ev, &tb); + if (!et) { + Py_XDECREF(tb); + Py_XDECREF(ev); + Py_INCREF(Py_None); + *pvalue = Py_None; + return 0; + } + if (likely(et == PyExc_StopIteration)) { + if (!ev) { + Py_INCREF(Py_None); + value = Py_None; + } + else if (likely(__Pyx_IS_TYPE(ev, (PyTypeObject*)PyExc_StopIteration))) { + #if CYTHON_COMPILING_IN_LIMITED_API || CYTHON_COMPILING_IN_GRAAL + value = PyObject_GetAttr(ev, __pyx_mstate_global->__pyx_n_u_value); + if (unlikely(!value)) goto limited_api_failure; + #else + value = ((PyStopIterationObject *)ev)->value; + Py_INCREF(value); + #endif + Py_DECREF(ev); + } + else if (unlikely(PyTuple_Check(ev))) { + Py_ssize_t tuple_size = __Pyx_PyTuple_GET_SIZE(ev); + #if !CYTHON_ASSUME_SAFE_SIZE + if (unlikely(tuple_size < 0)) { + Py_XDECREF(tb); + Py_DECREF(ev); + Py_DECREF(et); + return -1; + } + #endif + if (tuple_size >= 1) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + value = PyTuple_GET_ITEM(ev, 0); + Py_INCREF(value); +#elif CYTHON_ASSUME_SAFE_MACROS + value = PySequence_ITEM(ev, 0); +#else + value = PySequence_GetItem(ev, 0); + if (!value) goto limited_api_failure; +#endif + } else { + Py_INCREF(Py_None); + value = Py_None; + } + Py_DECREF(ev); + } + else if (!__Pyx_TypeCheck(ev, (PyTypeObject*)PyExc_StopIteration)) { + value = ev; + } + if (likely(value)) { + Py_XDECREF(tb); + Py_DECREF(et); + *pvalue = value; + return 0; + } + } else if (!__Pyx_PyErr_GivenExceptionMatches(et, PyExc_StopIteration)) { + __Pyx_ErrRestore(et, ev, tb); + return -1; + } + PyErr_NormalizeException(&et, &ev, &tb); + if (unlikely(!PyObject_TypeCheck(ev, (PyTypeObject*)PyExc_StopIteration))) { + __Pyx_ErrRestore(et, ev, tb); + return -1; + } + Py_XDECREF(tb); + Py_DECREF(et); +#if CYTHON_COMPILING_IN_LIMITED_API + value = PyObject_GetAttr(ev, __pyx_mstate_global->__pyx_n_u_value); +#else + value = ((PyStopIterationObject *)ev)->value; + Py_INCREF(value); +#endif + Py_DECREF(ev); +#if CYTHON_COMPILING_IN_LIMITED_API + if (unlikely(!value)) return -1; +#endif + *pvalue = value; + return 0; +#if CYTHON_COMPILING_IN_LIMITED_API || CYTHON_COMPILING_IN_GRAAL || !CYTHON_ASSUME_SAFE_MACROS + limited_api_failure: + Py_XDECREF(et); + Py_XDECREF(tb); + Py_XDECREF(ev); + return -1; +#endif +} +static CYTHON_INLINE +__Pyx_PySendResult __Pyx_Coroutine_status_from_result(PyObject **retval) { + if (*retval) { + return PYGEN_NEXT; + } else if (likely(__Pyx_PyGen__FetchStopIterationValue(__Pyx_PyThreadState_Current, retval) == 0)) { + return PYGEN_RETURN; + } else { + return PYGEN_ERROR; + } +} +static CYTHON_INLINE +void __Pyx_Coroutine_ExceptionClear(__Pyx_ExcInfoStruct *exc_state) { +#if PY_VERSION_HEX >= 0x030B00a4 + Py_CLEAR(exc_state->exc_value); +#else + PyObject *t, *v, *tb; + t = exc_state->exc_type; + v = exc_state->exc_value; + tb = exc_state->exc_traceback; + exc_state->exc_type = NULL; + exc_state->exc_value = NULL; + exc_state->exc_traceback = NULL; + Py_XDECREF(t); + Py_XDECREF(v); + Py_XDECREF(tb); +#endif +} +#define __Pyx_Coroutine_AlreadyRunningError(gen) (__Pyx__Coroutine_AlreadyRunningError(gen), (PyObject*)NULL) +static void __Pyx__Coroutine_AlreadyRunningError(__pyx_CoroutineObject *gen) { + const char *msg; + CYTHON_MAYBE_UNUSED_VAR(gen); + if ((0)) { + #ifdef __Pyx_Coroutine_USED + } else if (__Pyx_Coroutine_Check((PyObject*)gen)) { + msg = "coroutine already executing"; + #endif + #ifdef __Pyx_AsyncGen_USED + } else if (__Pyx_AsyncGen_CheckExact((PyObject*)gen)) { + msg = "async generator already executing"; + #endif + } else { + msg = "generator already executing"; + } + PyErr_SetString(PyExc_ValueError, msg); +} +static void __Pyx_Coroutine_AlreadyTerminatedError(PyObject *gen, PyObject *value, int closing) { + CYTHON_MAYBE_UNUSED_VAR(gen); + CYTHON_MAYBE_UNUSED_VAR(closing); + #ifdef __Pyx_Coroutine_USED + if (!closing && __Pyx_Coroutine_Check(gen)) { + PyErr_SetString(PyExc_RuntimeError, "cannot reuse already awaited coroutine"); + } else + #endif + if (value) { + #ifdef __Pyx_AsyncGen_USED + if (__Pyx_AsyncGen_CheckExact(gen)) + PyErr_SetNone(PyExc_StopAsyncIteration); + else + #endif + PyErr_SetNone(PyExc_StopIteration); + } +} +static +__Pyx_PySendResult __Pyx_Coroutine_SendEx(__pyx_CoroutineObject *self, PyObject *value, PyObject **result, int closing) { + __Pyx_PyThreadState_declare + PyThreadState *tstate; + __Pyx_ExcInfoStruct *exc_state; + PyObject *retval; + assert(__Pyx_Coroutine_get_is_running(self)); // Callers should ensure is_running + if (unlikely(self->resume_label == -1)) { + __Pyx_Coroutine_AlreadyTerminatedError((PyObject*)self, value, closing); + return PYGEN_ERROR; + } +#if CYTHON_FAST_THREAD_STATE + __Pyx_PyThreadState_assign + tstate = __pyx_tstate; +#else + tstate = __Pyx_PyThreadState_Current; +#endif + exc_state = &self->gi_exc_state; + if (exc_state->exc_value) { + #if CYTHON_COMPILING_IN_LIMITED_API || CYTHON_COMPILING_IN_PYPY + #else + PyObject *exc_tb; + #if PY_VERSION_HEX >= 0x030B00a4 && !CYTHON_COMPILING_IN_CPYTHON + exc_tb = PyException_GetTraceback(exc_state->exc_value); + #elif PY_VERSION_HEX >= 0x030B00a4 + exc_tb = ((PyBaseExceptionObject*) exc_state->exc_value)->traceback; + #else + exc_tb = exc_state->exc_traceback; + #endif + if (exc_tb) { + PyTracebackObject *tb = (PyTracebackObject *) exc_tb; + PyFrameObject *f = tb->tb_frame; + assert(f->f_back == NULL); + #if PY_VERSION_HEX >= 0x030B00A1 + f->f_back = PyThreadState_GetFrame(tstate); + #else + Py_XINCREF(tstate->frame); + f->f_back = tstate->frame; + #endif + #if PY_VERSION_HEX >= 0x030B00a4 && !CYTHON_COMPILING_IN_CPYTHON + Py_DECREF(exc_tb); + #endif + } + #endif + } +#if CYTHON_USE_EXC_INFO_STACK + exc_state->previous_item = tstate->exc_info; + tstate->exc_info = exc_state; +#else + if (exc_state->exc_type) { + __Pyx_ExceptionSwap(&exc_state->exc_type, &exc_state->exc_value, &exc_state->exc_traceback); + } else { + __Pyx_Coroutine_ExceptionClear(exc_state); + __Pyx_ExceptionSave(&exc_state->exc_type, &exc_state->exc_value, &exc_state->exc_traceback); + } +#endif + retval = self->body(self, tstate, value); +#if CYTHON_USE_EXC_INFO_STACK + exc_state = &self->gi_exc_state; + tstate->exc_info = exc_state->previous_item; + exc_state->previous_item = NULL; + __Pyx_Coroutine_ResetFrameBackpointer(exc_state); +#endif + *result = retval; + if (self->resume_label == -1) { + return likely(retval) ? PYGEN_RETURN : PYGEN_ERROR; + } + return PYGEN_NEXT; +} +static CYTHON_INLINE void __Pyx_Coroutine_ResetFrameBackpointer(__Pyx_ExcInfoStruct *exc_state) { +#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API + CYTHON_UNUSED_VAR(exc_state); +#else + PyObject *exc_tb; + #if PY_VERSION_HEX >= 0x030B00a4 + if (!exc_state->exc_value) return; + exc_tb = PyException_GetTraceback(exc_state->exc_value); + #else + exc_tb = exc_state->exc_traceback; + #endif + if (likely(exc_tb)) { + PyTracebackObject *tb = (PyTracebackObject *) exc_tb; + PyFrameObject *f = tb->tb_frame; + Py_CLEAR(f->f_back); + #if PY_VERSION_HEX >= 0x030B00a4 + Py_DECREF(exc_tb); + #endif + } +#endif +} +#define __Pyx_Coroutine_MethodReturnFromResult(gen, result, retval, iternext)\ + ((result) == PYGEN_NEXT ? (retval) : __Pyx__Coroutine_MethodReturnFromResult(gen, result, retval, iternext)) +static PyObject * +__Pyx__Coroutine_MethodReturnFromResult(PyObject* gen, __Pyx_PySendResult result, PyObject *retval, int iternext) { + CYTHON_MAYBE_UNUSED_VAR(gen); + if (likely(result == PYGEN_RETURN)) { + int is_async = 0; + #ifdef __Pyx_AsyncGen_USED + is_async = __Pyx_AsyncGen_CheckExact(gen); + #endif + __Pyx_ReturnWithStopIteration(retval, is_async, iternext); + Py_XDECREF(retval); + } + return NULL; +} +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE +PyObject *__Pyx_PyGen_Send(PyGenObject *gen, PyObject *arg) { +#if PY_VERSION_HEX <= 0x030A00A1 + return _PyGen_Send(gen, arg); +#else + PyObject *result; + if (PyIter_Send((PyObject*)gen, arg ? arg : Py_None, &result) == PYGEN_RETURN) { + if (PyAsyncGen_CheckExact(gen)) { + assert(result == Py_None); + PyErr_SetNone(PyExc_StopAsyncIteration); + } + else if (result == Py_None) { + PyErr_SetNone(PyExc_StopIteration); + } + else { +#if PY_VERSION_HEX < 0x030d00A1 + _PyGen_SetStopIterationValue(result); +#else + if (!PyTuple_Check(result) && !PyExceptionInstance_Check(result)) { + PyErr_SetObject(PyExc_StopIteration, result); + } else { + PyObject *exc = __Pyx_PyObject_CallOneArg(PyExc_StopIteration, result); + if (likely(exc != NULL)) { + PyErr_SetObject(PyExc_StopIteration, exc); + Py_DECREF(exc); + } + } +#endif + } + Py_DECREF(result); + result = NULL; + } + return result; +#endif +} +#endif +static CYTHON_INLINE __Pyx_PySendResult +__Pyx_Coroutine_FinishDelegation(__pyx_CoroutineObject *gen, PyObject** retval) { + __Pyx_PySendResult result; + PyObject *val = NULL; + assert(__Pyx_Coroutine_get_is_running(gen)); + __Pyx_Coroutine_Undelegate(gen); + __Pyx_PyGen__FetchStopIterationValue(__Pyx_PyThreadState_Current, &val); + result = __Pyx_Coroutine_SendEx(gen, val, retval, 0); + Py_XDECREF(val); + return result; +} +#if CYTHON_USE_AM_SEND +static __Pyx_PySendResult +__Pyx_Coroutine_SendToDelegate(__pyx_CoroutineObject *gen, __Pyx_pyiter_sendfunc gen_am_send, PyObject *value, PyObject **retval) { + PyObject *ret = NULL; + __Pyx_PySendResult delegate_result, result; + assert(__Pyx_Coroutine_get_is_running(gen)); + delegate_result = gen_am_send(gen->yieldfrom, value, &ret); + if (delegate_result == PYGEN_NEXT) { + assert (ret != NULL); + *retval = ret; + return PYGEN_NEXT; + } + assert (delegate_result != PYGEN_ERROR || ret == NULL); + __Pyx_Coroutine_Undelegate(gen); + result = __Pyx_Coroutine_SendEx(gen, ret, retval, 0); + Py_XDECREF(ret); + return result; +} +#endif +static PyObject *__Pyx_Coroutine_Send(PyObject *self, PyObject *value) { + PyObject *retval = NULL; + __Pyx_PySendResult result = __Pyx_Coroutine_AmSend(self, value, &retval); + return __Pyx_Coroutine_MethodReturnFromResult(self, result, retval, 0); +} +static __Pyx_PySendResult +__Pyx_Coroutine_AmSend(PyObject *self, PyObject *value, PyObject **retval) { + __Pyx_PySendResult result; + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject*) self; + if (unlikely(__Pyx_Coroutine_test_and_set_is_running(gen))) { + *retval = __Pyx_Coroutine_AlreadyRunningError(gen); + return PYGEN_ERROR; + } + #if CYTHON_USE_AM_SEND + if (gen->yieldfrom_am_send) { + result = __Pyx_Coroutine_SendToDelegate(gen, gen->yieldfrom_am_send, value, retval); + } else + #endif + if (gen->yieldfrom) { + PyObject *yf = gen->yieldfrom; + PyObject *ret; + #if !CYTHON_USE_AM_SEND + #ifdef __Pyx_Generator_USED + if (__Pyx_Generator_CheckExact(yf)) { + ret = __Pyx_Coroutine_Send(yf, value); + } else + #endif + #ifdef __Pyx_Coroutine_USED + if (__Pyx_Coroutine_Check(yf)) { + ret = __Pyx_Coroutine_Send(yf, value); + } else + #endif + #ifdef __Pyx_AsyncGen_USED + if (__pyx_PyAsyncGenASend_CheckExact(yf)) { + ret = __Pyx_async_gen_asend_send(yf, value); + } else + #endif + #if CYTHON_COMPILING_IN_CPYTHON + if (PyGen_CheckExact(yf)) { + ret = __Pyx_PyGen_Send((PyGenObject*)yf, value == Py_None ? NULL : value); + } else + if (PyCoro_CheckExact(yf)) { + ret = __Pyx_PyGen_Send((PyGenObject*)yf, value == Py_None ? NULL : value); + } else + #endif + #endif + { + #if !CYTHON_COMPILING_IN_LIMITED_API || __PYX_LIMITED_VERSION_HEX >= 0x03080000 + if (value == Py_None && PyIter_Check(yf)) + ret = __Pyx_PyIter_Next_Plain(yf); + else + #endif + ret = __Pyx_PyObject_CallMethod1(yf, __pyx_mstate_global->__pyx_n_u_send, value); + } + if (likely(ret)) { + __Pyx_Coroutine_unset_is_running(gen); + *retval = ret; + return PYGEN_NEXT; + } + result = __Pyx_Coroutine_FinishDelegation(gen, retval); + } else { + result = __Pyx_Coroutine_SendEx(gen, value, retval, 0); + } + __Pyx_Coroutine_unset_is_running(gen); + return result; +} +static int __Pyx_Coroutine_CloseIter(__pyx_CoroutineObject *gen, PyObject *yf) { + __Pyx_PySendResult result; + PyObject *retval = NULL; + CYTHON_UNUSED_VAR(gen); + assert(__Pyx_Coroutine_get_is_running(gen)); + #ifdef __Pyx_Generator_USED + if (__Pyx_Generator_CheckExact(yf)) { + result = __Pyx_Coroutine_Close(yf, &retval); + } else + #endif + #ifdef __Pyx_Coroutine_USED + if (__Pyx_Coroutine_Check(yf)) { + result = __Pyx_Coroutine_Close(yf, &retval); + } else + if (__Pyx_CoroutineAwait_CheckExact(yf)) { + result = __Pyx_CoroutineAwait_Close((__pyx_CoroutineAwaitObject*)yf); + } else + #endif + #ifdef __Pyx_AsyncGen_USED + if (__pyx_PyAsyncGenASend_CheckExact(yf)) { + retval = __Pyx_async_gen_asend_close(yf, NULL); + result = PYGEN_RETURN; + } else + if (__pyx_PyAsyncGenAThrow_CheckExact(yf)) { + retval = __Pyx_async_gen_athrow_close(yf, NULL); + result = PYGEN_RETURN; + } else + #endif + { + PyObject *meth; + result = PYGEN_RETURN; + meth = __Pyx_PyObject_GetAttrStrNoError(yf, __pyx_mstate_global->__pyx_n_u_close); + if (unlikely(!meth)) { + if (unlikely(PyErr_Occurred())) { + PyErr_WriteUnraisable(yf); + } + } else { + retval = __Pyx_PyObject_CallNoArg(meth); + Py_DECREF(meth); + if (unlikely(!retval)) { + result = PYGEN_ERROR; + } + } + } + Py_XDECREF(retval); + return result == PYGEN_ERROR ? -1 : 0; +} +static PyObject *__Pyx_Generator_Next(PyObject *self) { + __Pyx_PySendResult result; + PyObject *retval = NULL; + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject*) self; + if (unlikely(__Pyx_Coroutine_test_and_set_is_running(gen))) { + return __Pyx_Coroutine_AlreadyRunningError(gen); + } + #if CYTHON_USE_AM_SEND + if (gen->yieldfrom_am_send) { + result = __Pyx_Coroutine_SendToDelegate(gen, gen->yieldfrom_am_send, Py_None, &retval); + } else + #endif + if (gen->yieldfrom) { + PyObject *yf = gen->yieldfrom; + PyObject *ret; + #ifdef __Pyx_Generator_USED + if (__Pyx_Generator_CheckExact(yf)) { + ret = __Pyx_Generator_Next(yf); + } else + #endif + #ifdef __Pyx_Coroutine_USED + if (__Pyx_Coroutine_CheckExact(yf)) { + ret = __Pyx_Coroutine_Send(yf, Py_None); + } else + #endif + #if CYTHON_COMPILING_IN_CPYTHON && (PY_VERSION_HEX < 0x030A00A3 || !CYTHON_USE_AM_SEND) + if (PyGen_CheckExact(yf)) { + ret = __Pyx_PyGen_Send((PyGenObject*)yf, NULL); + } else + #endif + ret = __Pyx_PyIter_Next_Plain(yf); + if (likely(ret)) { + __Pyx_Coroutine_unset_is_running(gen); + return ret; + } + result = __Pyx_Coroutine_FinishDelegation(gen, &retval); + } else { + result = __Pyx_Coroutine_SendEx(gen, Py_None, &retval, 0); + } + __Pyx_Coroutine_unset_is_running(gen); + return __Pyx_Coroutine_MethodReturnFromResult(self, result, retval, 1); +} +static PyObject *__Pyx_Coroutine_Close_Method(PyObject *self, PyObject *arg) { + PyObject *retval = NULL; + __Pyx_PySendResult result; + CYTHON_UNUSED_VAR(arg); + result = __Pyx_Coroutine_Close(self, &retval); + if (unlikely(result == PYGEN_ERROR)) + return NULL; + Py_XDECREF(retval); + Py_RETURN_NONE; +} +static __Pyx_PySendResult +__Pyx_Coroutine_Close(PyObject *self, PyObject **retval) { + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; + __Pyx_PySendResult result; + PyObject *yf; + int err = 0; + if (unlikely(__Pyx_Coroutine_test_and_set_is_running(gen))) { + *retval = __Pyx_Coroutine_AlreadyRunningError(gen); + return PYGEN_ERROR; + } + yf = gen->yieldfrom; + if (yf) { + Py_INCREF(yf); + err = __Pyx_Coroutine_CloseIter(gen, yf); + __Pyx_Coroutine_Undelegate(gen); + Py_DECREF(yf); + } + if (err == 0) + PyErr_SetNone(PyExc_GeneratorExit); + result = __Pyx_Coroutine_SendEx(gen, NULL, retval, 1); + if (result == PYGEN_ERROR) { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_Coroutine_unset_is_running(gen); + if (!__Pyx_PyErr_Occurred()) { + return PYGEN_RETURN; + } else if (likely(__Pyx_PyErr_ExceptionMatches2(PyExc_GeneratorExit, PyExc_StopIteration))) { + __Pyx_PyErr_Clear(); + return PYGEN_RETURN; + } + return PYGEN_ERROR; + } else if (likely(result == PYGEN_RETURN && *retval == Py_None)) { + __Pyx_Coroutine_unset_is_running(gen); + return PYGEN_RETURN; + } else { + const char *msg; + Py_DECREF(*retval); + *retval = NULL; + if ((0)) { + #ifdef __Pyx_Coroutine_USED + } else if (__Pyx_Coroutine_Check(self)) { + msg = "coroutine ignored GeneratorExit"; + #endif + #ifdef __Pyx_AsyncGen_USED + } else if (__Pyx_AsyncGen_CheckExact(self)) { + msg = "async generator ignored GeneratorExit"; + #endif + } else { + msg = "generator ignored GeneratorExit"; + } + PyErr_SetString(PyExc_RuntimeError, msg); + __Pyx_Coroutine_unset_is_running(gen); + return PYGEN_ERROR; + } +} +static PyObject *__Pyx__Coroutine_Throw(PyObject *self, PyObject *typ, PyObject *val, PyObject *tb, + PyObject *args, int close_on_genexit) { + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; + PyObject *yf; + if (unlikely(__Pyx_Coroutine_test_and_set_is_running(gen))) + return __Pyx_Coroutine_AlreadyRunningError(gen); + yf = gen->yieldfrom; + if (yf) { + __Pyx_PySendResult result; + PyObject *ret; + Py_INCREF(yf); + if (__Pyx_PyErr_GivenExceptionMatches(typ, PyExc_GeneratorExit) && close_on_genexit) { + int err = __Pyx_Coroutine_CloseIter(gen, yf); + Py_DECREF(yf); + __Pyx_Coroutine_Undelegate(gen); + if (err < 0) + goto propagate_exception; + goto throw_here; + } + if (0 + #ifdef __Pyx_Generator_USED + || __Pyx_Generator_CheckExact(yf) + #endif + #ifdef __Pyx_Coroutine_USED + || __Pyx_Coroutine_Check(yf) + #endif + ) { + ret = __Pyx__Coroutine_Throw(yf, typ, val, tb, args, close_on_genexit); + #ifdef __Pyx_Coroutine_USED + } else if (__Pyx_CoroutineAwait_CheckExact(yf)) { + ret = __Pyx__Coroutine_Throw(((__pyx_CoroutineAwaitObject*)yf)->coroutine, typ, val, tb, args, close_on_genexit); + #endif + } else { + PyObject *meth = __Pyx_PyObject_GetAttrStrNoError(yf, __pyx_mstate_global->__pyx_n_u_throw); + if (unlikely(!meth)) { + Py_DECREF(yf); + if (unlikely(PyErr_Occurred())) { + __Pyx_Coroutine_unset_is_running(gen); + return NULL; + } + __Pyx_Coroutine_Undelegate(gen); + goto throw_here; + } + if (likely(args)) { + ret = __Pyx_PyObject_Call(meth, args, NULL); + } else { + PyObject *cargs[4] = {NULL, typ, val, tb}; + ret = __Pyx_PyObject_FastCall(meth, cargs+1, 3 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET); + } + Py_DECREF(meth); + } + Py_DECREF(yf); + if (ret) { + __Pyx_Coroutine_unset_is_running(gen); + return ret; + } + result = __Pyx_Coroutine_FinishDelegation(gen, &ret); + __Pyx_Coroutine_unset_is_running(gen); + return __Pyx_Coroutine_MethodReturnFromResult(self, result, ret, 0); + } +throw_here: + __Pyx_Raise(typ, val, tb, NULL); +propagate_exception: + { + PyObject *retval = NULL; + __Pyx_PySendResult result = __Pyx_Coroutine_SendEx(gen, NULL, &retval, 0); + __Pyx_Coroutine_unset_is_running(gen); + return __Pyx_Coroutine_MethodReturnFromResult(self, result, retval, 0); + } +} +static PyObject *__Pyx_Coroutine_Throw(PyObject *self, PyObject *args) { + PyObject *typ; + PyObject *val = NULL; + PyObject *tb = NULL; + if (unlikely(!PyArg_UnpackTuple(args, "throw", 1, 3, &typ, &val, &tb))) + return NULL; + return __Pyx__Coroutine_Throw(self, typ, val, tb, args, 1); +} +static CYTHON_INLINE int __Pyx_Coroutine_traverse_excstate(__Pyx_ExcInfoStruct *exc_state, visitproc visit, void *arg) { +#if PY_VERSION_HEX >= 0x030B00a4 + Py_VISIT(exc_state->exc_value); +#else + Py_VISIT(exc_state->exc_type); + Py_VISIT(exc_state->exc_value); + Py_VISIT(exc_state->exc_traceback); +#endif + return 0; +} +static int __Pyx_Coroutine_traverse(__pyx_CoroutineObject *gen, visitproc visit, void *arg) { + { + int e = __Pyx_call_type_traverse((PyObject*)gen, 1, visit, arg); + if (e) return e; + } + Py_VISIT(gen->closure); + Py_VISIT(gen->classobj); + Py_VISIT(gen->yieldfrom); + return __Pyx_Coroutine_traverse_excstate(&gen->gi_exc_state, visit, arg); +} +static int __Pyx_Coroutine_clear(PyObject *self) { + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; + Py_CLEAR(gen->closure); + Py_CLEAR(gen->classobj); + __Pyx_Coroutine_Undelegate(gen); + __Pyx_Coroutine_ExceptionClear(&gen->gi_exc_state); +#ifdef __Pyx_AsyncGen_USED + if (__Pyx_AsyncGen_CheckExact(self)) { + Py_CLEAR(((__pyx_PyAsyncGenObject*)gen)->ag_finalizer); + } +#endif + Py_CLEAR(gen->gi_code); + Py_CLEAR(gen->gi_frame); + Py_CLEAR(gen->gi_name); + Py_CLEAR(gen->gi_qualname); + Py_CLEAR(gen->gi_modulename); + return 0; +} +static void __Pyx_Coroutine_dealloc(PyObject *self) { + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; + PyObject_GC_UnTrack(gen); + #if PY_VERSION_HEX < 0x030C0000 || CYTHON_COMPILING_IN_LIMITED_API + if (gen->gi_weakreflist != NULL) + #endif + PyObject_ClearWeakRefs(self); + if (gen->resume_label >= 0) { + PyObject_GC_Track(self); +#if CYTHON_USE_TP_FINALIZE + if (unlikely(PyObject_CallFinalizerFromDealloc(self))) +#else + { + destructor del = __Pyx_PyObject_GetSlot(gen, tp_del, destructor); + if (del) del(self); + } + if (unlikely(Py_REFCNT(self) > 0)) +#endif + { + return; + } + PyObject_GC_UnTrack(self); + } +#ifdef __Pyx_AsyncGen_USED + if (__Pyx_AsyncGen_CheckExact(self)) { + /* We have to handle this case for asynchronous generators + right here, because this code has to be between UNTRACK + and GC_Del. */ + Py_CLEAR(((__pyx_PyAsyncGenObject*)self)->ag_finalizer); + } +#endif + __Pyx_Coroutine_clear(self); + __Pyx_PyHeapTypeObject_GC_Del(gen); +} +#if CYTHON_USE_TP_FINALIZE +static void __Pyx_Coroutine_del(PyObject *self) { + PyObject *error_type, *error_value, *error_traceback; + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; + __Pyx_PyThreadState_declare + if (gen->resume_label < 0) { + return; + } + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&error_type, &error_value, &error_traceback); +#ifdef __Pyx_AsyncGen_USED + if (__Pyx_AsyncGen_CheckExact(self)) { + __pyx_PyAsyncGenObject *agen = (__pyx_PyAsyncGenObject*)self; + PyObject *finalizer = agen->ag_finalizer; + if (finalizer && !agen->ag_closed) { + PyObject *res = __Pyx_PyObject_CallOneArg(finalizer, self); + if (unlikely(!res)) { + PyErr_WriteUnraisable(self); + } else { + Py_DECREF(res); + } + __Pyx_ErrRestore(error_type, error_value, error_traceback); + return; + } + } +#endif + if (unlikely(gen->resume_label == 0 && !error_value)) { +#ifdef __Pyx_Coroutine_USED +#ifdef __Pyx_Generator_USED + if (!__Pyx_Generator_CheckExact(self)) +#endif + { + PyObject_GC_UnTrack(self); + if (unlikely(PyErr_WarnFormat(PyExc_RuntimeWarning, 1, "coroutine '%.50S' was never awaited", gen->gi_qualname) < 0)) + PyErr_WriteUnraisable(self); + PyObject_GC_Track(self); + } +#endif + } else { + PyObject *retval = NULL; + __Pyx_PySendResult result = __Pyx_Coroutine_Close(self, &retval); + if (result == PYGEN_ERROR) { + PyErr_WriteUnraisable(self); + } else { + Py_XDECREF(retval); + } + } + __Pyx_ErrRestore(error_type, error_value, error_traceback); +} +#endif +static PyObject * +__Pyx_Coroutine_get_name(__pyx_CoroutineObject *self, void *context) +{ + PyObject *name = self->gi_name; + CYTHON_UNUSED_VAR(context); + if (unlikely(!name)) name = Py_None; + Py_INCREF(name); + return name; +} +static int +__Pyx_Coroutine_set_name(__pyx_CoroutineObject *self, PyObject *value, void *context) +{ + CYTHON_UNUSED_VAR(context); + if (unlikely(value == NULL || !PyUnicode_Check(value))) { + PyErr_SetString(PyExc_TypeError, + "__name__ must be set to a string object"); + return -1; + } + Py_INCREF(value); + __Pyx_Py_XDECREF_SET(self->gi_name, value); + return 0; +} +static PyObject * +__Pyx_Coroutine_get_qualname(__pyx_CoroutineObject *self, void *context) +{ + PyObject *name = self->gi_qualname; + CYTHON_UNUSED_VAR(context); + if (unlikely(!name)) name = Py_None; + Py_INCREF(name); + return name; +} +static int +__Pyx_Coroutine_set_qualname(__pyx_CoroutineObject *self, PyObject *value, void *context) +{ + CYTHON_UNUSED_VAR(context); + if (unlikely(value == NULL || !PyUnicode_Check(value))) { + PyErr_SetString(PyExc_TypeError, + "__qualname__ must be set to a string object"); + return -1; + } + Py_INCREF(value); + __Pyx_Py_XDECREF_SET(self->gi_qualname, value); + return 0; +} +static PyObject * +__Pyx__Coroutine_get_frame(__pyx_CoroutineObject *self) +{ +#if !CYTHON_COMPILING_IN_LIMITED_API + PyObject *frame; + #if PY_VERSION_HEX >= 0x030d0000 + Py_BEGIN_CRITICAL_SECTION(self); + #endif + frame = self->gi_frame; + if (!frame) { + if (unlikely(!self->gi_code)) { + Py_RETURN_NONE; + } + PyObject *globals = PyDict_New(); + if (unlikely(!globals)) return NULL; + frame = (PyObject *) PyFrame_New( + PyThreadState_Get(), /*PyThreadState *tstate,*/ + (PyCodeObject*) self->gi_code, /*PyCodeObject *code,*/ + globals, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + Py_DECREF(globals); + if (unlikely(!frame)) + return NULL; + if (unlikely(self->gi_frame)) { + Py_DECREF(frame); + frame = self->gi_frame; + } else { + self->gi_frame = frame; + } + } + Py_INCREF(frame); + #if PY_VERSION_HEX >= 0x030d0000 + Py_END_CRITICAL_SECTION(); + #endif + return frame; +#else + CYTHON_UNUSED_VAR(self); + Py_RETURN_NONE; +#endif +} +static PyObject * +__Pyx_Coroutine_get_frame(__pyx_CoroutineObject *self, void *context) { + CYTHON_UNUSED_VAR(context); + PyObject *frame = self->gi_frame; + if (frame) + return __Pyx_NewRef(frame); + return __Pyx__Coroutine_get_frame(self); +} +static __pyx_CoroutineObject *__Pyx__Coroutine_New( + PyTypeObject* type, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, + PyObject *name, PyObject *qualname, PyObject *module_name) { + __pyx_CoroutineObject *gen = PyObject_GC_New(__pyx_CoroutineObject, type); + if (unlikely(!gen)) + return NULL; + return __Pyx__Coroutine_NewInit(gen, body, code, closure, name, qualname, module_name); +} +static __pyx_CoroutineObject *__Pyx__Coroutine_NewInit( + __pyx_CoroutineObject *gen, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, + PyObject *name, PyObject *qualname, PyObject *module_name) { + gen->body = body; + gen->closure = closure; + Py_XINCREF(closure); + gen->is_running = 0; + gen->resume_label = 0; + gen->classobj = NULL; + gen->yieldfrom = NULL; + gen->yieldfrom_am_send = NULL; + #if PY_VERSION_HEX >= 0x030B00a4 && !CYTHON_COMPILING_IN_LIMITED_API + gen->gi_exc_state.exc_value = NULL; + #else + gen->gi_exc_state.exc_type = NULL; + gen->gi_exc_state.exc_value = NULL; + gen->gi_exc_state.exc_traceback = NULL; + #endif +#if CYTHON_USE_EXC_INFO_STACK + gen->gi_exc_state.previous_item = NULL; +#endif +#if PY_VERSION_HEX < 0x030C0000 || CYTHON_COMPILING_IN_LIMITED_API + gen->gi_weakreflist = NULL; +#endif + Py_XINCREF(qualname); + gen->gi_qualname = qualname; + Py_XINCREF(name); + gen->gi_name = name; + Py_XINCREF(module_name); + gen->gi_modulename = module_name; + Py_XINCREF(code); + gen->gi_code = code; + gen->gi_frame = NULL; + PyObject_GC_Track(gen); + return gen; +} +static char __Pyx_Coroutine_test_and_set_is_running(__pyx_CoroutineObject *gen) { + char result; + #if PY_VERSION_HEX >= 0x030d0000 && !CYTHON_COMPILING_IN_LIMITED_API + Py_BEGIN_CRITICAL_SECTION(gen); + #endif + result = gen->is_running; + gen->is_running = 1; + #if PY_VERSION_HEX >= 0x030d0000 && !CYTHON_COMPILING_IN_LIMITED_API + Py_END_CRITICAL_SECTION(); + #endif + return result; +} +static void __Pyx_Coroutine_unset_is_running(__pyx_CoroutineObject *gen) { + #if PY_VERSION_HEX >= 0x030d0000 && !CYTHON_COMPILING_IN_LIMITED_API + Py_BEGIN_CRITICAL_SECTION(gen); + #endif + assert(gen->is_running); + gen->is_running = 0; + #if PY_VERSION_HEX >= 0x030d0000 && !CYTHON_COMPILING_IN_LIMITED_API + Py_END_CRITICAL_SECTION(); + #endif +} +static char __Pyx_Coroutine_get_is_running(__pyx_CoroutineObject *gen) { + char result; + #if PY_VERSION_HEX >= 0x030d0000 && !CYTHON_COMPILING_IN_LIMITED_API + Py_BEGIN_CRITICAL_SECTION(gen); + #endif + result = gen->is_running; + #if PY_VERSION_HEX >= 0x030d0000 && !CYTHON_COMPILING_IN_LIMITED_API + Py_END_CRITICAL_SECTION(); + #endif + return result; +} +static PyObject *__Pyx_Coroutine_get_is_running_getter(PyObject *gen, void *closure) { + CYTHON_UNUSED_VAR(closure); + char result = __Pyx_Coroutine_get_is_running((__pyx_CoroutineObject*)gen); + if (result) Py_RETURN_TRUE; + else Py_RETURN_FALSE; +} +#if __PYX_HAS_PY_AM_SEND == 2 +static void __Pyx_SetBackportTypeAmSend(PyTypeObject *type, __Pyx_PyAsyncMethodsStruct *static_amsend_methods, __Pyx_pyiter_sendfunc am_send) { + Py_ssize_t ptr_offset = (char*)(type->tp_as_async) - (char*)type; + if (ptr_offset < 0 || ptr_offset > type->tp_basicsize) { + return; + } + memcpy((void*)static_amsend_methods, (void*)(type->tp_as_async), sizeof(*type->tp_as_async)); + static_amsend_methods->am_send = am_send; + type->tp_as_async = __Pyx_SlotTpAsAsync(static_amsend_methods); +} +#endif +static PyObject *__Pyx_Coroutine_fail_reduce_ex(PyObject *self, PyObject *arg) { + CYTHON_UNUSED_VAR(arg); + __Pyx_TypeName self_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE((PyObject*)self)); + PyErr_Format(PyExc_TypeError, "cannot pickle '" __Pyx_FMT_TYPENAME "' object", + self_type_name); + __Pyx_DECREF_TypeName(self_type_name); + return NULL; +} + +/* Generator */ +static PyMethodDef __pyx_Generator_methods[] = { + {"send", (PyCFunction) __Pyx_Coroutine_Send, METH_O, + PyDoc_STR("send(arg) -> send 'arg' into generator,\nreturn next yielded value or raise StopIteration.")}, + {"throw", (PyCFunction) __Pyx_Coroutine_Throw, METH_VARARGS, + PyDoc_STR("throw(typ[,val[,tb]]) -> raise exception in generator,\nreturn next yielded value or raise StopIteration.")}, + {"close", (PyCFunction) __Pyx_Coroutine_Close_Method, METH_NOARGS, + PyDoc_STR("close() -> raise GeneratorExit inside generator.")}, + {"__reduce_ex__", (PyCFunction) __Pyx_Coroutine_fail_reduce_ex, METH_O, 0}, + {"__reduce__", (PyCFunction) __Pyx_Coroutine_fail_reduce_ex, METH_NOARGS, 0}, + {0, 0, 0, 0} +}; +static PyMemberDef __pyx_Generator_memberlist[] = { + {"gi_yieldfrom", T_OBJECT, offsetof(__pyx_CoroutineObject, yieldfrom), READONLY, + PyDoc_STR("object being iterated by 'yield from', or None")}, + {"gi_code", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_code), READONLY, NULL}, + {"__module__", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_modulename), 0, 0}, +#if PY_VERSION_HEX < 0x030C0000 || CYTHON_COMPILING_IN_LIMITED_API + {"__weaklistoffset__", T_PYSSIZET, offsetof(__pyx_CoroutineObject, gi_weakreflist), READONLY, 0}, +#endif + {0, 0, 0, 0, 0} +}; +static PyGetSetDef __pyx_Generator_getsets[] = { + {"__name__", (getter)__Pyx_Coroutine_get_name, (setter)__Pyx_Coroutine_set_name, + PyDoc_STR("name of the generator"), 0}, + {"__qualname__", (getter)__Pyx_Coroutine_get_qualname, (setter)__Pyx_Coroutine_set_qualname, + PyDoc_STR("qualified name of the generator"), 0}, + {"gi_frame", (getter)__Pyx_Coroutine_get_frame, NULL, + PyDoc_STR("Frame of the generator"), 0}, + {"gi_running", __Pyx_Coroutine_get_is_running_getter, NULL, NULL, NULL}, + {0, 0, 0, 0, 0} +}; +static PyType_Slot __pyx_GeneratorType_slots[] = { + {Py_tp_dealloc, (void *)__Pyx_Coroutine_dealloc}, + {Py_tp_traverse, (void *)__Pyx_Coroutine_traverse}, + {Py_tp_iter, (void *)PyObject_SelfIter}, + {Py_tp_iternext, (void *)__Pyx_Generator_Next}, + {Py_tp_methods, (void *)__pyx_Generator_methods}, + {Py_tp_members, (void *)__pyx_Generator_memberlist}, + {Py_tp_getset, (void *)__pyx_Generator_getsets}, + {Py_tp_getattro, (void *) PyObject_GenericGetAttr}, +#if CYTHON_USE_TP_FINALIZE + {Py_tp_finalize, (void *)__Pyx_Coroutine_del}, +#endif +#if __PYX_HAS_PY_AM_SEND == 1 + {Py_am_send, (void *)__Pyx_Coroutine_AmSend}, +#endif + {0, 0}, +}; +static PyType_Spec __pyx_GeneratorType_spec = { + __PYX_TYPE_MODULE_PREFIX "generator", + sizeof(__pyx_CoroutineObject), + 0, +#if PY_VERSION_HEX >= 0x030C0000 && !CYTHON_COMPILING_IN_LIMITED_API + Py_TPFLAGS_MANAGED_WEAKREF | +#endif + Py_TPFLAGS_IMMUTABLETYPE | Py_TPFLAGS_DISALLOW_INSTANTIATION | + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | __Pyx_TPFLAGS_HAVE_AM_SEND, + __pyx_GeneratorType_slots +}; +#if __PYX_HAS_PY_AM_SEND == 2 +static __Pyx_PyAsyncMethodsStruct __pyx_Generator_as_async; +#endif +static int __pyx_Generator_init(PyObject *module) { + __pyx_mstatetype *mstate = __Pyx_PyModule_GetState(module); + mstate->__pyx_GeneratorType = __Pyx_FetchCommonTypeFromSpec( + mstate->__pyx_CommonTypesMetaclassType, module, &__pyx_GeneratorType_spec, NULL); + if (unlikely(!mstate->__pyx_GeneratorType)) { + return -1; + } +#if __PYX_HAS_PY_AM_SEND == 2 + __Pyx_SetBackportTypeAmSend(mstate->__pyx_GeneratorType, &__pyx_Generator_as_async, &__Pyx_Coroutine_AmSend); +#endif + return 0; +} +static PyObject *__Pyx_Generator_GetInlinedResult(PyObject *self) { + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject*) self; + PyObject *retval = NULL; + if (unlikely(__Pyx_Coroutine_test_and_set_is_running(gen))) { + return __Pyx_Coroutine_AlreadyRunningError(gen); + } + __Pyx_PySendResult result = __Pyx_Coroutine_SendEx(gen, Py_None, &retval, 0); + __Pyx_Coroutine_unset_is_running(gen); + (void) result; + assert (result == PYGEN_RETURN || result == PYGEN_ERROR); + assert ((result == PYGEN_RETURN && retval != NULL) || (result == PYGEN_ERROR && retval == NULL)); + return retval; +} + +/* CheckBinaryVersion */ +static int __Pyx_check_binary_version(unsigned long ct_version, unsigned long rt_version, int allow_newer) { + const unsigned long MAJOR_MINOR = 0xFFFF0000UL; + if ((rt_version & MAJOR_MINOR) == (ct_version & MAJOR_MINOR)) + return 0; + if (likely(allow_newer && (rt_version & MAJOR_MINOR) > (ct_version & MAJOR_MINOR))) + return 1; + { + char message[200]; + PyOS_snprintf(message, sizeof(message), + "compile time Python version %d.%d " + "of module '%.100s' " + "%s " + "runtime version %d.%d", + (int) (ct_version >> 24), (int) ((ct_version >> 16) & 0xFF), + __Pyx_MODULE_NAME, + (allow_newer) ? "was newer than" : "does not match", + (int) (rt_version >> 24), (int) ((rt_version >> 16) & 0xFF) + ); + return PyErr_WarnEx(NULL, message, 1); + } +} + +/* NewCodeObj */ +#if CYTHON_COMPILING_IN_LIMITED_API + static PyObject* __Pyx__PyCode_New(int a, int p, int k, int l, int s, int f, + PyObject *code, PyObject *c, PyObject* n, PyObject *v, + PyObject *fv, PyObject *cell, PyObject* fn, + PyObject *name, int fline, PyObject *lnos) { + PyObject *exception_table = NULL; + PyObject *types_module=NULL, *code_type=NULL, *result=NULL; + #if __PYX_LIMITED_VERSION_HEX < 0x030b0000 + PyObject *version_info; + PyObject *py_minor_version = NULL; + #endif + long minor_version = 0; + PyObject *type, *value, *traceback; + PyErr_Fetch(&type, &value, &traceback); + #if __PYX_LIMITED_VERSION_HEX >= 0x030b0000 + minor_version = 11; + #else + if (!(version_info = PySys_GetObject("version_info"))) goto end; + if (!(py_minor_version = PySequence_GetItem(version_info, 1))) goto end; + minor_version = PyLong_AsLong(py_minor_version); + Py_DECREF(py_minor_version); + if (minor_version == -1 && PyErr_Occurred()) goto end; + #endif + if (!(types_module = PyImport_ImportModule("types"))) goto end; + if (!(code_type = PyObject_GetAttrString(types_module, "CodeType"))) goto end; + if (minor_version <= 7) { + (void)p; + result = PyObject_CallFunction(code_type, "iiiiiOOOOOOiOOO", a, k, l, s, f, code, + c, n, v, fn, name, fline, lnos, fv, cell); + } else if (minor_version <= 10) { + result = PyObject_CallFunction(code_type, "iiiiiiOOOOOOiOOO", a,p, k, l, s, f, code, + c, n, v, fn, name, fline, lnos, fv, cell); + } else { + if (!(exception_table = PyBytes_FromStringAndSize(NULL, 0))) goto end; + result = PyObject_CallFunction(code_type, "iiiiiiOOOOOOOiOOOO", a,p, k, l, s, f, code, + c, n, v, fn, name, name, fline, lnos, exception_table, fv, cell); + } + end: + Py_XDECREF(code_type); + Py_XDECREF(exception_table); + Py_XDECREF(types_module); + if (type) { + PyErr_Restore(type, value, traceback); + } + return result; + } +#elif PY_VERSION_HEX >= 0x030B0000 + static PyCodeObject* __Pyx__PyCode_New(int a, int p, int k, int l, int s, int f, + PyObject *code, PyObject *c, PyObject* n, PyObject *v, + PyObject *fv, PyObject *cell, PyObject* fn, + PyObject *name, int fline, PyObject *lnos) { + PyCodeObject *result; + result = + #if PY_VERSION_HEX >= 0x030C0000 + PyUnstable_Code_NewWithPosOnlyArgs + #else + PyCode_NewWithPosOnlyArgs + #endif + (a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, name, fline, lnos, __pyx_mstate_global->__pyx_empty_bytes); + #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030c00A1 + if (likely(result)) + result->_co_firsttraceable = 0; + #endif + return result; + } +#elif !CYTHON_COMPILING_IN_PYPY + #define __Pyx__PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_NewWithPosOnlyArgs(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#else + #define __Pyx__PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#endif +static PyObject* __Pyx_PyCode_New( + const __Pyx_PyCode_New_function_description descr, + PyObject * const *varnames, + PyObject *filename, + PyObject *funcname, + PyObject *line_table, + PyObject *tuple_dedup_map +) { + PyObject *code_obj = NULL, *varnames_tuple_dedup = NULL, *code_bytes = NULL; + Py_ssize_t var_count = (Py_ssize_t) descr.nlocals; + PyObject *varnames_tuple = PyTuple_New(var_count); + if (unlikely(!varnames_tuple)) return NULL; + for (Py_ssize_t i=0; i < var_count; i++) { + Py_INCREF(varnames[i]); + if (__Pyx_PyTuple_SET_ITEM(varnames_tuple, i, varnames[i]) != (0)) goto done; + } + #if CYTHON_COMPILING_IN_LIMITED_API + varnames_tuple_dedup = PyDict_GetItem(tuple_dedup_map, varnames_tuple); + if (!varnames_tuple_dedup) { + if (unlikely(PyDict_SetItem(tuple_dedup_map, varnames_tuple, varnames_tuple) < 0)) goto done; + varnames_tuple_dedup = varnames_tuple; + } + #else + varnames_tuple_dedup = PyDict_SetDefault(tuple_dedup_map, varnames_tuple, varnames_tuple); + if (unlikely(!varnames_tuple_dedup)) goto done; + #endif + #if CYTHON_AVOID_BORROWED_REFS + Py_INCREF(varnames_tuple_dedup); + #endif + if (__PYX_LIMITED_VERSION_HEX >= (0x030b0000) && line_table != NULL && !CYTHON_COMPILING_IN_GRAAL) { + Py_ssize_t line_table_length = __Pyx_PyBytes_GET_SIZE(line_table); + #if !CYTHON_ASSUME_SAFE_SIZE + if (unlikely(line_table_length == -1)) goto done; + #endif + Py_ssize_t code_len = (line_table_length * 2 + 4) & ~3LL; + code_bytes = PyBytes_FromStringAndSize(NULL, code_len); + if (unlikely(!code_bytes)) goto done; + char* c_code_bytes = PyBytes_AsString(code_bytes); + if (unlikely(!c_code_bytes)) goto done; + memset(c_code_bytes, 0, (size_t) code_len); + } + code_obj = (PyObject*) __Pyx__PyCode_New( + (int) descr.argcount, + (int) descr.num_posonly_args, + (int) descr.num_kwonly_args, + (int) descr.nlocals, + 0, + (int) descr.flags, + code_bytes ? code_bytes : __pyx_mstate_global->__pyx_empty_bytes, + __pyx_mstate_global->__pyx_empty_tuple, + __pyx_mstate_global->__pyx_empty_tuple, + varnames_tuple_dedup, + __pyx_mstate_global->__pyx_empty_tuple, + __pyx_mstate_global->__pyx_empty_tuple, + filename, + funcname, + (int) descr.first_line, + (__PYX_LIMITED_VERSION_HEX >= (0x030b0000) && line_table) ? line_table : __pyx_mstate_global->__pyx_empty_bytes + ); +done: + Py_XDECREF(code_bytes); + #if CYTHON_AVOID_BORROWED_REFS + Py_XDECREF(varnames_tuple_dedup); + #endif + Py_DECREF(varnames_tuple); + return code_obj; +} + +/* DecompressString */ +static PyObject *__Pyx_DecompressString(const char *s, Py_ssize_t length, int algo) { + PyObject *module, *decompress, *compressed_bytes, *decompressed; + const char* module_name = algo == 3 ? "compression.zstd" : algo == 2 ? "bz2" : "zlib"; + PyObject *methodname = PyUnicode_FromString("decompress"); + if (unlikely(!methodname)) return NULL; + #if __PYX_LIMITED_VERSION_HEX >= 0x030e0000 + if (algo == 3) { + PyObject *fromlist = Py_BuildValue("[O]", methodname); + if (unlikely(!fromlist)) return NULL; + module = PyImport_ImportModuleLevel("compression.zstd", NULL, NULL, fromlist, 0); + Py_DECREF(fromlist); + } else + #endif + module = PyImport_ImportModule(module_name); + if (unlikely(!module)) goto import_failed; + decompress = PyObject_GetAttr(module, methodname); + if (unlikely(!decompress)) goto import_failed; + { + #ifdef __cplusplus + char *memview_bytes = const_cast(s); + #else + #if defined(__clang__) + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wcast-qual" + #elif !defined(__INTEL_COMPILER) && defined(__GNUC__) + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wcast-qual" + #endif + char *memview_bytes = (char*) s; + #if defined(__clang__) + #pragma clang diagnostic pop + #elif !defined(__INTEL_COMPILER) && defined(__GNUC__) + #pragma GCC diagnostic pop + #endif + #endif + #if CYTHON_COMPILING_IN_LIMITED_API && !defined(PyBUF_READ) + int memview_flags = 0x100; + #else + int memview_flags = PyBUF_READ; + #endif + compressed_bytes = PyMemoryView_FromMemory(memview_bytes, length, memview_flags); + } + if (unlikely(!compressed_bytes)) { + Py_DECREF(decompress); + goto bad; + } + decompressed = PyObject_CallFunctionObjArgs(decompress, compressed_bytes, NULL); + Py_DECREF(compressed_bytes); + Py_DECREF(decompress); + Py_DECREF(module); + Py_DECREF(methodname); + return decompressed; +import_failed: + PyErr_Format(PyExc_ImportError, + "Failed to import '%.20s.decompress' - cannot initialise module strings. " + "String compression was configured with the C macro 'CYTHON_COMPRESS_STRINGS=%d'.", + module_name, algo); +bad: + Py_XDECREF(module); + Py_DECREF(methodname); + return NULL; +} + +#include +static CYTHON_INLINE Py_ssize_t __Pyx_ssize_strlen(const char *s) { + size_t len = strlen(s); + if (unlikely(len > (size_t) PY_SSIZE_T_MAX)) { + PyErr_SetString(PyExc_OverflowError, "byte string is too long"); + return -1; + } + return (Py_ssize_t) len; +} +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { + Py_ssize_t len = __Pyx_ssize_strlen(c_str); + if (unlikely(len < 0)) return NULL; + return __Pyx_PyUnicode_FromStringAndSize(c_str, len); +} +static CYTHON_INLINE PyObject* __Pyx_PyByteArray_FromString(const char* c_str) { + Py_ssize_t len = __Pyx_ssize_strlen(c_str); + if (unlikely(len < 0)) return NULL; + return PyByteArray_FromStringAndSize(c_str, len); +} +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { + Py_ssize_t ignore; + return __Pyx_PyObject_AsStringAndSize(o, &ignore); +} +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 +static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; +#if CYTHON_COMPILING_IN_LIMITED_API + { + const char* result; + Py_ssize_t unicode_length; + CYTHON_MAYBE_UNUSED_VAR(unicode_length); // only for __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + #if __PYX_LIMITED_VERSION_HEX < 0x030A0000 + if (unlikely(PyArg_Parse(o, "s#", &result, length) < 0)) return NULL; + #else + result = PyUnicode_AsUTF8AndSize(o, length); + #endif + #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + unicode_length = PyUnicode_GetLength(o); + if (unlikely(unicode_length < 0)) return NULL; + if (unlikely(unicode_length != *length)) { + PyUnicode_AsASCIIString(o); + return NULL; + } + #endif + return result; + } +#else +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + if (likely(PyUnicode_IS_ASCII(o))) { + *length = PyUnicode_GET_LENGTH(o); + return PyUnicode_AsUTF8(o); + } else { + PyUnicode_AsASCIIString(o); + return NULL; + } +#else + return PyUnicode_AsUTF8AndSize(o, length); +#endif +#endif +} +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 + if (PyUnicode_Check(o)) { + return __Pyx_PyUnicode_AsStringAndSize(o, length); + } else +#endif + if (PyByteArray_Check(o)) { +#if (CYTHON_ASSUME_SAFE_SIZE && CYTHON_ASSUME_SAFE_MACROS) || (CYTHON_COMPILING_IN_PYPY && (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))) + *length = PyByteArray_GET_SIZE(o); + return PyByteArray_AS_STRING(o); +#else + *length = PyByteArray_Size(o); + if (*length == -1) return NULL; + return PyByteArray_AsString(o); +#endif + } else + { + char* result; + int r = PyBytes_AsStringAndSize(o, &result, length); + if (unlikely(r < 0)) { + return NULL; + } else { + return result; + } + } +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { + int is_true = x == Py_True; + if (is_true | (x == Py_False) | (x == Py_None)) return is_true; + else return PyObject_IsTrue(x); +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { + int retval; + if (unlikely(!x)) return -1; + retval = __Pyx_PyObject_IsTrue(x); + Py_DECREF(x); + return retval; +} +static PyObject* __Pyx_PyNumber_LongWrongResultType(PyObject* result) { + __Pyx_TypeName result_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(result)); + if (PyLong_Check(result)) { + if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, + "__int__ returned non-int (type " __Pyx_FMT_TYPENAME "). " + "The ability to return an instance of a strict subclass of int is deprecated, " + "and may be removed in a future version of Python.", + result_type_name)) { + __Pyx_DECREF_TypeName(result_type_name); + Py_DECREF(result); + return NULL; + } + __Pyx_DECREF_TypeName(result_type_name); + return result; + } + PyErr_Format(PyExc_TypeError, + "__int__ returned non-int (type " __Pyx_FMT_TYPENAME ")", + result_type_name); + __Pyx_DECREF_TypeName(result_type_name); + Py_DECREF(result); + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyNumber_Long(PyObject* x) { +#if CYTHON_USE_TYPE_SLOTS + PyNumberMethods *m; +#endif + PyObject *res = NULL; + if (likely(PyLong_Check(x))) + return __Pyx_NewRef(x); +#if CYTHON_USE_TYPE_SLOTS + m = Py_TYPE(x)->tp_as_number; + if (likely(m && m->nb_int)) { + res = m->nb_int(x); + } +#else + if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { + res = PyNumber_Long(x); + } +#endif + if (likely(res)) { + if (unlikely(!PyLong_CheckExact(res))) { + return __Pyx_PyNumber_LongWrongResultType(res); + } + } + else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_TypeError, + "an integer is required"); + } + return res; +} +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { + Py_ssize_t ival; + PyObject *x; + if (likely(PyLong_CheckExact(b))) { + #if CYTHON_USE_PYLONG_INTERNALS + if (likely(__Pyx_PyLong_IsCompact(b))) { + return __Pyx_PyLong_CompactValue(b); + } else { + const digit* digits = __Pyx_PyLong_Digits(b); + const Py_ssize_t size = __Pyx_PyLong_SignedDigitCount(b); + switch (size) { + case 2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + } + } + #endif + return PyLong_AsSsize_t(b); + } + x = PyNumber_Index(b); + if (!x) return -1; + ival = PyLong_AsSsize_t(x); + Py_DECREF(x); + return ival; +} +static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject* o) { + if (sizeof(Py_hash_t) == sizeof(Py_ssize_t)) { + return (Py_hash_t) __Pyx_PyIndex_AsSsize_t(o); + } else { + Py_ssize_t ival; + PyObject *x; + x = PyNumber_Index(o); + if (!x) return -1; + ival = PyLong_AsLong(x); + Py_DECREF(x); + return ival; + } +} +static CYTHON_INLINE PyObject *__Pyx_Owned_Py_None(int b) { + CYTHON_UNUSED_VAR(b); + return __Pyx_NewRef(Py_None); +} +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { + return __Pyx_NewRef(b ? Py_True: Py_False); +} +static CYTHON_INLINE PyObject * __Pyx_PyLong_FromSize_t(size_t ival) { + return PyLong_FromSize_t(ival); +} + + +/* MultiPhaseInitModuleState */ +#if CYTHON_PEP489_MULTI_PHASE_INIT && CYTHON_USE_MODULE_STATE +#ifndef CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE +#if (CYTHON_COMPILING_IN_LIMITED_API || PY_VERSION_HEX >= 0x030C0000) + #define CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE 1 +#else + #define CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE 0 +#endif +#endif +#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE && !CYTHON_ATOMICS +#error "Module state with PEP489 requires atomics. Currently that's one of\ + C11, C++11, gcc atomic intrinsics or MSVC atomic intrinsics" +#endif +#if !CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE +#define __Pyx_ModuleStateLookup_Lock() +#define __Pyx_ModuleStateLookup_Unlock() +#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d0000 +static PyMutex __Pyx_ModuleStateLookup_mutex = {0}; +#define __Pyx_ModuleStateLookup_Lock() PyMutex_Lock(&__Pyx_ModuleStateLookup_mutex) +#define __Pyx_ModuleStateLookup_Unlock() PyMutex_Unlock(&__Pyx_ModuleStateLookup_mutex) +#elif defined(__cplusplus) && __cplusplus >= 201103L +#include +static std::mutex __Pyx_ModuleStateLookup_mutex; +#define __Pyx_ModuleStateLookup_Lock() __Pyx_ModuleStateLookup_mutex.lock() +#define __Pyx_ModuleStateLookup_Unlock() __Pyx_ModuleStateLookup_mutex.unlock() +#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201112L) && !defined(__STDC_NO_THREADS__) +#include +static mtx_t __Pyx_ModuleStateLookup_mutex; +static once_flag __Pyx_ModuleStateLookup_mutex_once_flag = ONCE_FLAG_INIT; +static void __Pyx_ModuleStateLookup_initialize_mutex(void) { + mtx_init(&__Pyx_ModuleStateLookup_mutex, mtx_plain); +} +#define __Pyx_ModuleStateLookup_Lock()\ + call_once(&__Pyx_ModuleStateLookup_mutex_once_flag, __Pyx_ModuleStateLookup_initialize_mutex);\ + mtx_lock(&__Pyx_ModuleStateLookup_mutex) +#define __Pyx_ModuleStateLookup_Unlock() mtx_unlock(&__Pyx_ModuleStateLookup_mutex) +#elif defined(HAVE_PTHREAD_H) +#include +static pthread_mutex_t __Pyx_ModuleStateLookup_mutex = PTHREAD_MUTEX_INITIALIZER; +#define __Pyx_ModuleStateLookup_Lock() pthread_mutex_lock(&__Pyx_ModuleStateLookup_mutex) +#define __Pyx_ModuleStateLookup_Unlock() pthread_mutex_unlock(&__Pyx_ModuleStateLookup_mutex) +#elif defined(_WIN32) +#include // synchapi.h on its own doesn't work +static SRWLOCK __Pyx_ModuleStateLookup_mutex = SRWLOCK_INIT; +#define __Pyx_ModuleStateLookup_Lock() AcquireSRWLockExclusive(&__Pyx_ModuleStateLookup_mutex) +#define __Pyx_ModuleStateLookup_Unlock() ReleaseSRWLockExclusive(&__Pyx_ModuleStateLookup_mutex) +#else +#error "No suitable lock available for CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE.\ + Requires C standard >= C11, or C++ standard >= C++11,\ + or pthreads, or the Windows 32 API, or Python >= 3.13." +#endif +typedef struct { + int64_t id; + PyObject *module; +} __Pyx_InterpreterIdAndModule; +typedef struct { + char interpreter_id_as_index; + Py_ssize_t count; + Py_ssize_t allocated; + __Pyx_InterpreterIdAndModule table[1]; +} __Pyx_ModuleStateLookupData; +#define __PYX_MODULE_STATE_LOOKUP_SMALL_SIZE 32 +#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE +static __pyx_atomic_int_type __Pyx_ModuleStateLookup_read_counter = 0; +#endif +#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE +static __pyx_atomic_ptr_type __Pyx_ModuleStateLookup_data = 0; +#else +static __Pyx_ModuleStateLookupData* __Pyx_ModuleStateLookup_data = NULL; +#endif +static __Pyx_InterpreterIdAndModule* __Pyx_State_FindModuleStateLookupTableLowerBound( + __Pyx_InterpreterIdAndModule* table, + Py_ssize_t count, + int64_t interpreterId) { + __Pyx_InterpreterIdAndModule* begin = table; + __Pyx_InterpreterIdAndModule* end = begin + count; + if (begin->id == interpreterId) { + return begin; + } + while ((end - begin) > __PYX_MODULE_STATE_LOOKUP_SMALL_SIZE) { + __Pyx_InterpreterIdAndModule* halfway = begin + (end - begin)/2; + if (halfway->id == interpreterId) { + return halfway; + } + if (halfway->id < interpreterId) { + begin = halfway; + } else { + end = halfway; + } + } + for (; begin < end; ++begin) { + if (begin->id >= interpreterId) return begin; + } + return begin; +} +static PyObject *__Pyx_State_FindModule(CYTHON_UNUSED void* dummy) { + int64_t interpreter_id = PyInterpreterState_GetID(__Pyx_PyInterpreterState_Get()); + if (interpreter_id == -1) return NULL; +#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE + __Pyx_ModuleStateLookupData* data = (__Pyx_ModuleStateLookupData*)__pyx_atomic_pointer_load_relaxed(&__Pyx_ModuleStateLookup_data); + { + __pyx_atomic_incr_acq_rel(&__Pyx_ModuleStateLookup_read_counter); + if (likely(data)) { + __Pyx_ModuleStateLookupData* new_data = (__Pyx_ModuleStateLookupData*)__pyx_atomic_pointer_load_acquire(&__Pyx_ModuleStateLookup_data); + if (likely(data == new_data)) { + goto read_finished; + } + } + __pyx_atomic_decr_acq_rel(&__Pyx_ModuleStateLookup_read_counter); + __Pyx_ModuleStateLookup_Lock(); + __pyx_atomic_incr_relaxed(&__Pyx_ModuleStateLookup_read_counter); + data = (__Pyx_ModuleStateLookupData*)__pyx_atomic_pointer_load_relaxed(&__Pyx_ModuleStateLookup_data); + __Pyx_ModuleStateLookup_Unlock(); + } + read_finished:; +#else + __Pyx_ModuleStateLookupData* data = __Pyx_ModuleStateLookup_data; +#endif + __Pyx_InterpreterIdAndModule* found = NULL; + if (unlikely(!data)) goto end; + if (data->interpreter_id_as_index) { + if (interpreter_id < data->count) { + found = data->table+interpreter_id; + } + } else { + found = __Pyx_State_FindModuleStateLookupTableLowerBound( + data->table, data->count, interpreter_id); + } + end: + { + PyObject *result=NULL; + if (found && found->id == interpreter_id) { + result = found->module; + } +#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE + __pyx_atomic_decr_acq_rel(&__Pyx_ModuleStateLookup_read_counter); +#endif + return result; + } +} +#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE +static void __Pyx_ModuleStateLookup_wait_until_no_readers(void) { + while (__pyx_atomic_load(&__Pyx_ModuleStateLookup_read_counter) != 0); +} +#else +#define __Pyx_ModuleStateLookup_wait_until_no_readers() +#endif +static int __Pyx_State_AddModuleInterpIdAsIndex(__Pyx_ModuleStateLookupData **old_data, PyObject* module, int64_t interpreter_id) { + Py_ssize_t to_allocate = (*old_data)->allocated; + while (to_allocate <= interpreter_id) { + if (to_allocate == 0) to_allocate = 1; + else to_allocate *= 2; + } + __Pyx_ModuleStateLookupData *new_data = *old_data; + if (to_allocate != (*old_data)->allocated) { + new_data = (__Pyx_ModuleStateLookupData *)realloc( + *old_data, + sizeof(__Pyx_ModuleStateLookupData)+(to_allocate-1)*sizeof(__Pyx_InterpreterIdAndModule)); + if (!new_data) { + PyErr_NoMemory(); + return -1; + } + for (Py_ssize_t i = new_data->allocated; i < to_allocate; ++i) { + new_data->table[i].id = i; + new_data->table[i].module = NULL; + } + new_data->allocated = to_allocate; + } + new_data->table[interpreter_id].module = module; + if (new_data->count < interpreter_id+1) { + new_data->count = interpreter_id+1; + } + *old_data = new_data; + return 0; +} +static void __Pyx_State_ConvertFromInterpIdAsIndex(__Pyx_ModuleStateLookupData *data) { + __Pyx_InterpreterIdAndModule *read = data->table; + __Pyx_InterpreterIdAndModule *write = data->table; + __Pyx_InterpreterIdAndModule *end = read + data->count; + for (; readmodule) { + write->id = read->id; + write->module = read->module; + ++write; + } + } + data->count = write - data->table; + for (; writeid = 0; + write->module = NULL; + } + data->interpreter_id_as_index = 0; +} +static int __Pyx_State_AddModule(PyObject* module, CYTHON_UNUSED void* dummy) { + int64_t interpreter_id = PyInterpreterState_GetID(__Pyx_PyInterpreterState_Get()); + if (interpreter_id == -1) return -1; + int result = 0; + __Pyx_ModuleStateLookup_Lock(); +#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE + __Pyx_ModuleStateLookupData *old_data = (__Pyx_ModuleStateLookupData *) + __pyx_atomic_pointer_exchange(&__Pyx_ModuleStateLookup_data, 0); +#else + __Pyx_ModuleStateLookupData *old_data = __Pyx_ModuleStateLookup_data; +#endif + __Pyx_ModuleStateLookupData *new_data = old_data; + if (!new_data) { + new_data = (__Pyx_ModuleStateLookupData *)calloc(1, sizeof(__Pyx_ModuleStateLookupData)); + if (!new_data) { + result = -1; + PyErr_NoMemory(); + goto end; + } + new_data->allocated = 1; + new_data->interpreter_id_as_index = 1; + } + __Pyx_ModuleStateLookup_wait_until_no_readers(); + if (new_data->interpreter_id_as_index) { + if (interpreter_id < __PYX_MODULE_STATE_LOOKUP_SMALL_SIZE) { + result = __Pyx_State_AddModuleInterpIdAsIndex(&new_data, module, interpreter_id); + goto end; + } + __Pyx_State_ConvertFromInterpIdAsIndex(new_data); + } + { + Py_ssize_t insert_at = 0; + { + __Pyx_InterpreterIdAndModule* lower_bound = __Pyx_State_FindModuleStateLookupTableLowerBound( + new_data->table, new_data->count, interpreter_id); + assert(lower_bound); + insert_at = lower_bound - new_data->table; + if (unlikely(insert_at < new_data->count && lower_bound->id == interpreter_id)) { + lower_bound->module = module; + goto end; // already in table, nothing more to do + } + } + if (new_data->count+1 >= new_data->allocated) { + Py_ssize_t to_allocate = (new_data->count+1)*2; + new_data = + (__Pyx_ModuleStateLookupData*)realloc( + new_data, + sizeof(__Pyx_ModuleStateLookupData) + + (to_allocate-1)*sizeof(__Pyx_InterpreterIdAndModule)); + if (!new_data) { + result = -1; + new_data = old_data; + PyErr_NoMemory(); + goto end; + } + new_data->allocated = to_allocate; + } + ++new_data->count; + int64_t last_id = interpreter_id; + PyObject *last_module = module; + for (Py_ssize_t i=insert_at; icount; ++i) { + int64_t current_id = new_data->table[i].id; + new_data->table[i].id = last_id; + last_id = current_id; + PyObject *current_module = new_data->table[i].module; + new_data->table[i].module = last_module; + last_module = current_module; + } + } + end: +#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE + __pyx_atomic_pointer_exchange(&__Pyx_ModuleStateLookup_data, new_data); +#else + __Pyx_ModuleStateLookup_data = new_data; +#endif + __Pyx_ModuleStateLookup_Unlock(); + return result; +} +static int __Pyx_State_RemoveModule(CYTHON_UNUSED void* dummy) { + int64_t interpreter_id = PyInterpreterState_GetID(__Pyx_PyInterpreterState_Get()); + if (interpreter_id == -1) return -1; + __Pyx_ModuleStateLookup_Lock(); +#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE + __Pyx_ModuleStateLookupData *data = (__Pyx_ModuleStateLookupData *) + __pyx_atomic_pointer_exchange(&__Pyx_ModuleStateLookup_data, 0); +#else + __Pyx_ModuleStateLookupData *data = __Pyx_ModuleStateLookup_data; +#endif + if (data->interpreter_id_as_index) { + if (interpreter_id < data->count) { + data->table[interpreter_id].module = NULL; + } + goto done; + } + { + __Pyx_ModuleStateLookup_wait_until_no_readers(); + __Pyx_InterpreterIdAndModule* lower_bound = __Pyx_State_FindModuleStateLookupTableLowerBound( + data->table, data->count, interpreter_id); + if (!lower_bound) goto done; + if (lower_bound->id != interpreter_id) goto done; + __Pyx_InterpreterIdAndModule *end = data->table+data->count; + for (;lower_boundid = (lower_bound+1)->id; + lower_bound->module = (lower_bound+1)->module; + } + } + --data->count; + if (data->count == 0) { + free(data); + data = NULL; + } + done: +#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE + __pyx_atomic_pointer_exchange(&__Pyx_ModuleStateLookup_data, data); +#else + __Pyx_ModuleStateLookup_data = data; +#endif + __Pyx_ModuleStateLookup_Unlock(); + return 0; +} +#endif + +/* #### Code section: utility_code_pragmas_end ### */ +#ifdef _MSC_VER +#pragma warning( pop ) +#endif + + + +/* #### Code section: end ### */ +#endif /* Py_PYTHON_H */ diff --git a/py311/lib/python3.11/site-packages/fontTools/cu2qu/cu2qu.py b/py311/lib/python3.11/site-packages/fontTools/cu2qu/cu2qu.py new file mode 100644 index 0000000000000000000000000000000000000000..150c03fb4a06d7d5e875a9450c958693c601a6fa --- /dev/null +++ b/py311/lib/python3.11/site-packages/fontTools/cu2qu/cu2qu.py @@ -0,0 +1,563 @@ +# cython: language_level=3 +# distutils: define_macros=CYTHON_TRACE_NOGIL=1 + +# Copyright 2015 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import cython +except (AttributeError, ImportError): + # if cython not installed, use mock module with no-op decorators and types + from fontTools.misc import cython +COMPILED = cython.compiled + +import math + +from .errors import Error as Cu2QuError, ApproxNotFoundError + + +__all__ = ["curve_to_quadratic", "curves_to_quadratic"] + +MAX_N = 100 + +NAN = float("NaN") + + +@cython.cfunc +@cython.inline +@cython.returns(cython.double) +@cython.locals(v1=cython.complex, v2=cython.complex, result=cython.double) +def dot(v1, v2): + """Return the dot product of two vectors. + + Args: + v1 (complex): First vector. + v2 (complex): Second vector. + + Returns: + double: Dot product. + """ + result = (v1 * v2.conjugate()).real + # When vectors are perpendicular (i.e. dot product is 0), the above expression may + # yield slightly different results when running in pure Python vs C/Cython, + # both of which are correct within IEEE-754 floating-point precision. + # It's probably due to the different order of operations and roundings in each + # implementation. Because we are using the result in a denominator and catching + # ZeroDivisionError (see `calc_intersect`), it's best to normalize the result here. + if abs(result) < 1e-15: + result = 0.0 + return result + + +@cython.cfunc +@cython.locals(z=cython.complex, den=cython.double) +@cython.locals(zr=cython.double, zi=cython.double) +def _complex_div_by_real(z, den): + """Divide complex by real using Python's method (two separate divisions). + + This ensures bit-exact compatibility with Python's complex division, + avoiding C's multiply-by-reciprocal optimization that can cause 1 ULP differences + on some platforms/compilers (e.g. clang on macOS arm64). + + https://github.com/fonttools/fonttools/issues/3928 + """ + zr = z.real + zi = z.imag + return complex(zr / den, zi / den) + + +@cython.cfunc +@cython.inline +@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex) +@cython.locals( + _1=cython.complex, _2=cython.complex, _3=cython.complex, _4=cython.complex +) +def calc_cubic_points(a, b, c, d): + _1 = d + _2 = _complex_div_by_real(c, 3.0) + d + _3 = _complex_div_by_real(b + c, 3.0) + _2 + _4 = a + d + c + b + return _1, _2, _3, _4 + + +@cython.cfunc +@cython.inline +@cython.locals( + p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex +) +@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex) +def calc_cubic_parameters(p0, p1, p2, p3): + c = (p1 - p0) * 3.0 + b = (p2 - p1) * 3.0 - c + d = p0 + a = p3 - d - c - b + return a, b, c, d + + +@cython.cfunc +@cython.inline +@cython.locals( + p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex +) +def split_cubic_into_n_iter(p0, p1, p2, p3, n): + """Split a cubic Bezier into n equal parts. + + Splits the curve into `n` equal parts by curve time. + (t=0..1/n, t=1/n..2/n, ...) + + Args: + p0 (complex): Start point of curve. + p1 (complex): First handle of curve. + p2 (complex): Second handle of curve. + p3 (complex): End point of curve. + + Returns: + An iterator yielding the control points (four complex values) of the + subcurves. + """ + # Hand-coded special-cases + if n == 2: + return iter(split_cubic_into_two(p0, p1, p2, p3)) + if n == 3: + return iter(split_cubic_into_three(p0, p1, p2, p3)) + if n == 4: + a, b = split_cubic_into_two(p0, p1, p2, p3) + return iter( + split_cubic_into_two(a[0], a[1], a[2], a[3]) + + split_cubic_into_two(b[0], b[1], b[2], b[3]) + ) + if n == 6: + a, b = split_cubic_into_two(p0, p1, p2, p3) + return iter( + split_cubic_into_three(a[0], a[1], a[2], a[3]) + + split_cubic_into_three(b[0], b[1], b[2], b[3]) + ) + + return _split_cubic_into_n_gen(p0, p1, p2, p3, n) + + +@cython.locals( + p0=cython.complex, + p1=cython.complex, + p2=cython.complex, + p3=cython.complex, + n=cython.int, +) +@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex) +@cython.locals( + dt=cython.double, delta_2=cython.double, delta_3=cython.double, i=cython.int +) +@cython.locals( + a1=cython.complex, b1=cython.complex, c1=cython.complex, d1=cython.complex +) +def _split_cubic_into_n_gen(p0, p1, p2, p3, n): + a, b, c, d = calc_cubic_parameters(p0, p1, p2, p3) + dt = 1 / n + delta_2 = dt * dt + delta_3 = dt * delta_2 + for i in range(n): + t1 = i * dt + t1_2 = t1 * t1 + # calc new a, b, c and d + a1 = a * delta_3 + b1 = (3 * a * t1 + b) * delta_2 + c1 = (2 * b * t1 + c + 3 * a * t1_2) * dt + d1 = a * t1 * t1_2 + b * t1_2 + c * t1 + d + yield calc_cubic_points(a1, b1, c1, d1) + + +@cython.cfunc +@cython.inline +@cython.locals( + p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex +) +@cython.locals(mid=cython.complex, deriv3=cython.complex) +def split_cubic_into_two(p0, p1, p2, p3): + """Split a cubic Bezier into two equal parts. + + Splits the curve into two equal parts at t = 0.5 + + Args: + p0 (complex): Start point of curve. + p1 (complex): First handle of curve. + p2 (complex): Second handle of curve. + p3 (complex): End point of curve. + + Returns: + tuple: Two cubic Beziers (each expressed as a tuple of four complex + values). + """ + mid = (p0 + 3 * (p1 + p2) + p3) * 0.125 + deriv3 = (p3 + p2 - p1 - p0) * 0.125 + return ( + (p0, (p0 + p1) * 0.5, mid - deriv3, mid), + (mid, mid + deriv3, (p2 + p3) * 0.5, p3), + ) + + +@cython.cfunc +@cython.inline +@cython.locals( + p0=cython.complex, + p1=cython.complex, + p2=cython.complex, + p3=cython.complex, +) +@cython.locals( + mid1=cython.complex, + deriv1=cython.complex, + mid2=cython.complex, + deriv2=cython.complex, +) +def split_cubic_into_three(p0, p1, p2, p3): + """Split a cubic Bezier into three equal parts. + + Splits the curve into three equal parts at t = 1/3 and t = 2/3 + + Args: + p0 (complex): Start point of curve. + p1 (complex): First handle of curve. + p2 (complex): Second handle of curve. + p3 (complex): End point of curve. + + Returns: + tuple: Three cubic Beziers (each expressed as a tuple of four complex + values). + """ + mid1 = (8 * p0 + 12 * p1 + 6 * p2 + p3) * (1 / 27) + deriv1 = (p3 + 3 * p2 - 4 * p0) * (1 / 27) + mid2 = (p0 + 6 * p1 + 12 * p2 + 8 * p3) * (1 / 27) + deriv2 = (4 * p3 - 3 * p1 - p0) * (1 / 27) + return ( + (p0, (2 * p0 + p1) / 3.0, mid1 - deriv1, mid1), + (mid1, mid1 + deriv1, mid2 - deriv2, mid2), + (mid2, mid2 + deriv2, (p2 + 2 * p3) / 3.0, p3), + ) + + +@cython.cfunc +@cython.inline +@cython.returns(cython.complex) +@cython.locals( + t=cython.double, + p0=cython.complex, + p1=cython.complex, + p2=cython.complex, + p3=cython.complex, +) +@cython.locals(_p1=cython.complex, _p2=cython.complex) +def cubic_approx_control(t, p0, p1, p2, p3): + """Approximate a cubic Bezier using a quadratic one. + + Args: + t (double): Position of control point. + p0 (complex): Start point of curve. + p1 (complex): First handle of curve. + p2 (complex): Second handle of curve. + p3 (complex): End point of curve. + + Returns: + complex: Location of candidate control point on quadratic curve. + """ + _p1 = p0 + (p1 - p0) * 1.5 + _p2 = p3 + (p2 - p3) * 1.5 + return _p1 + (_p2 - _p1) * t + + +@cython.cfunc +@cython.inline +@cython.returns(cython.complex) +@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex) +@cython.locals(ab=cython.complex, cd=cython.complex, p=cython.complex, h=cython.double) +def calc_intersect(a, b, c, d): + """Calculate the intersection of two lines. + + Args: + a (complex): Start point of first line. + b (complex): End point of first line. + c (complex): Start point of second line. + d (complex): End point of second line. + + Returns: + complex: Location of intersection if one present, ``complex(NaN,NaN)`` + if no intersection was found. + """ + ab = b - a + cd = d - c + p = ab * 1j + try: + h = dot(p, a - c) / dot(p, cd) + except ZeroDivisionError: + # if 3 or 4 points are equal, we do have an intersection despite the zero-div: + # return one of the off-curves so that the algorithm can attempt a one-curve + # solution if it's within tolerance: + # https://github.com/linebender/kurbo/pull/484 + if b == c and (a == b or c == d): + return b + return complex(NAN, NAN) + return c + cd * h + + +@cython.cfunc +@cython.returns(cython.int) +@cython.locals( + tolerance=cython.double, + p0=cython.complex, + p1=cython.complex, + p2=cython.complex, + p3=cython.complex, +) +@cython.locals(mid=cython.complex, deriv3=cython.complex) +def cubic_farthest_fit_inside(p0, p1, p2, p3, tolerance): + """Check if a cubic Bezier lies within a given distance of the origin. + + "Origin" means *the* origin (0,0), not the start of the curve. Note that no + checks are made on the start and end positions of the curve; this function + only checks the inside of the curve. + + Args: + p0 (complex): Start point of curve. + p1 (complex): First handle of curve. + p2 (complex): Second handle of curve. + p3 (complex): End point of curve. + tolerance (double): Distance from origin. + + Returns: + bool: True if the cubic Bezier ``p`` entirely lies within a distance + ``tolerance`` of the origin, False otherwise. + """ + # First check p2 then p1, as p2 has higher error early on. + if abs(p2) <= tolerance and abs(p1) <= tolerance: + return True + + # Split. + mid = (p0 + 3 * (p1 + p2) + p3) * 0.125 + if abs(mid) > tolerance: + return False + deriv3 = (p3 + p2 - p1 - p0) * 0.125 + return cubic_farthest_fit_inside( + p0, (p0 + p1) * 0.5, mid - deriv3, mid, tolerance + ) and cubic_farthest_fit_inside(mid, mid + deriv3, (p2 + p3) * 0.5, p3, tolerance) + + +@cython.cfunc +@cython.inline +@cython.locals(tolerance=cython.double) +@cython.locals( + q1=cython.complex, + c0=cython.complex, + c1=cython.complex, + c2=cython.complex, + c3=cython.complex, +) +def cubic_approx_quadratic(cubic, tolerance): + """Approximate a cubic Bezier with a single quadratic within a given tolerance. + + Args: + cubic (sequence): Four complex numbers representing control points of + the cubic Bezier curve. + tolerance (double): Permitted deviation from the original curve. + + Returns: + Three complex numbers representing control points of the quadratic + curve if it fits within the given tolerance, or ``None`` if no suitable + curve could be calculated. + """ + + q1 = calc_intersect(cubic[0], cubic[1], cubic[2], cubic[3]) + if math.isnan(q1.imag): + return None + c0 = cubic[0] + c3 = cubic[3] + c1 = c0 + (q1 - c0) * (2 / 3) + c2 = c3 + (q1 - c3) * (2 / 3) + if not cubic_farthest_fit_inside(0, c1 - cubic[1], c2 - cubic[2], 0, tolerance): + return None + return c0, q1, c3 + + +@cython.cfunc +@cython.locals(n=cython.int, tolerance=cython.double) +@cython.locals(i=cython.int) +@cython.locals(all_quadratic=cython.int) +@cython.locals( + c0=cython.complex, c1=cython.complex, c2=cython.complex, c3=cython.complex +) +@cython.locals( + q0=cython.complex, + q1=cython.complex, + next_q1=cython.complex, + q2=cython.complex, + d1=cython.complex, +) +def cubic_approx_spline(cubic, n, tolerance, all_quadratic): + """Approximate a cubic Bezier curve with a spline of n quadratics. + + Args: + cubic (sequence): Four complex numbers representing control points of + the cubic Bezier curve. + n (int): Number of quadratic Bezier curves in the spline. + tolerance (double): Permitted deviation from the original curve. + + Returns: + A list of ``n+2`` complex numbers, representing control points of the + quadratic spline if it fits within the given tolerance, or ``None`` if + no suitable spline could be calculated. + """ + + if n == 1: + return cubic_approx_quadratic(cubic, tolerance) + if n == 2 and all_quadratic == False: + return cubic + + cubics = split_cubic_into_n_iter(cubic[0], cubic[1], cubic[2], cubic[3], n) + + # calculate the spline of quadratics and check errors at the same time. + next_cubic = next(cubics) + next_q1 = cubic_approx_control( + 0, next_cubic[0], next_cubic[1], next_cubic[2], next_cubic[3] + ) + q2 = cubic[0] + d1 = 0j + spline = [cubic[0], next_q1] + for i in range(1, n + 1): + # Current cubic to convert + c0, c1, c2, c3 = next_cubic + + # Current quadratic approximation of current cubic + q0 = q2 + q1 = next_q1 + if i < n: + next_cubic = next(cubics) + next_q1 = cubic_approx_control( + i / (n - 1), next_cubic[0], next_cubic[1], next_cubic[2], next_cubic[3] + ) + spline.append(next_q1) + q2 = (q1 + next_q1) * 0.5 + else: + q2 = c3 + + # End-point deltas + d0 = d1 + d1 = q2 - c3 + + if abs(d1) > tolerance or not cubic_farthest_fit_inside( + d0, + q0 + (q1 - q0) * (2 / 3) - c1, + q2 + (q1 - q2) * (2 / 3) - c2, + d1, + tolerance, + ): + return None + spline.append(cubic[3]) + + return spline + + +@cython.locals(max_err=cython.double) +@cython.locals(n=cython.int) +@cython.locals(all_quadratic=cython.int) +def curve_to_quadratic(curve, max_err, all_quadratic=True): + """Approximate a cubic Bezier curve with a spline of n quadratics. + + Args: + cubic (sequence): Four 2D tuples representing control points of + the cubic Bezier curve. + max_err (double): Permitted deviation from the original curve. + all_quadratic (bool): If True (default) returned value is a + quadratic spline. If False, it's either a single quadratic + curve or a single cubic curve. + + Returns: + If all_quadratic is True: A list of 2D tuples, representing + control points of the quadratic spline if it fits within the + given tolerance, or ``None`` if no suitable spline could be + calculated. + + If all_quadratic is False: Either a quadratic curve (if length + of output is 3), or a cubic curve (if length of output is 4). + """ + + curve = [complex(*p) for p in curve] + + for n in range(1, MAX_N + 1): + spline = cubic_approx_spline(curve, n, max_err, all_quadratic) + if spline is not None: + # done. go home + return [(s.real, s.imag) for s in spline] + + raise ApproxNotFoundError(curve) + + +@cython.locals(l=cython.int, last_i=cython.int, i=cython.int) +@cython.locals(all_quadratic=cython.int) +def curves_to_quadratic(curves, max_errors, all_quadratic=True): + """Return quadratic Bezier splines approximating the input cubic Beziers. + + Args: + curves: A sequence of *n* curves, each curve being a sequence of four + 2D tuples. + max_errors: A sequence of *n* floats representing the maximum permissible + deviation from each of the cubic Bezier curves. + all_quadratic (bool): If True (default) returned values are a + quadratic spline. If False, they are either a single quadratic + curve or a single cubic curve. + + Example:: + + >>> curves_to_quadratic( [ + ... [ (50,50), (100,100), (150,100), (200,50) ], + ... [ (75,50), (120,100), (150,75), (200,60) ] + ... ], [1,1] ) + [[(50.0, 50.0), (75.0, 75.0), (125.0, 91.66666666666666), (175.0, 75.0), (200.0, 50.0)], [(75.0, 50.0), (97.5, 75.0), (135.41666666666666, 82.08333333333333), (175.0, 67.5), (200.0, 60.0)]] + + The returned splines have "implied oncurve points" suitable for use in + TrueType ``glif`` outlines - i.e. in the first spline returned above, + the first quadratic segment runs from (50,50) to + ( (75 + 125)/2 , (120 + 91.666..)/2 ) = (100, 83.333...). + + Returns: + If all_quadratic is True, a list of splines, each spline being a list + of 2D tuples. + + If all_quadratic is False, a list of curves, each curve being a quadratic + (length 3), or cubic (length 4). + + Raises: + fontTools.cu2qu.Errors.ApproxNotFoundError: if no suitable approximation + can be found for all curves with the given parameters. + """ + + curves = [[complex(*p) for p in curve] for curve in curves] + assert len(max_errors) == len(curves) + + l = len(curves) + splines = [None] * l + last_i = i = 0 + n = 1 + while True: + spline = cubic_approx_spline(curves[i], n, max_errors[i], all_quadratic) + if spline is None: + if n == MAX_N: + break + n += 1 + last_i = i + continue + splines[i] = spline + i = (i + 1) % l + if i == last_i: + # done. go home + return [[(s.real, s.imag) for s in spline] for spline in splines] + + raise ApproxNotFoundError(curves) diff --git a/py311/lib/python3.11/site-packages/fontTools/cu2qu/errors.py b/py311/lib/python3.11/site-packages/fontTools/cu2qu/errors.py new file mode 100644 index 0000000000000000000000000000000000000000..fa3dc42937131c5db54890dde8f519b15f5d0ff1 --- /dev/null +++ b/py311/lib/python3.11/site-packages/fontTools/cu2qu/errors.py @@ -0,0 +1,77 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class Error(Exception): + """Base Cu2Qu exception class for all other errors.""" + + +class ApproxNotFoundError(Error): + def __init__(self, curve): + message = "no approximation found: %s" % curve + super().__init__(message) + self.curve = curve + + +class UnequalZipLengthsError(Error): + pass + + +class IncompatibleGlyphsError(Error): + def __init__(self, glyphs): + assert len(glyphs) > 1 + self.glyphs = glyphs + names = set(repr(g.name) for g in glyphs) + if len(names) > 1: + self.combined_name = "{%s}" % ", ".join(sorted(names)) + else: + self.combined_name = names.pop() + + def __repr__(self): + return "<%s %s>" % (type(self).__name__, self.combined_name) + + +class IncompatibleSegmentNumberError(IncompatibleGlyphsError): + def __str__(self): + return "Glyphs named %s have different number of segments" % ( + self.combined_name + ) + + +class IncompatibleSegmentTypesError(IncompatibleGlyphsError): + def __init__(self, glyphs, segments): + IncompatibleGlyphsError.__init__(self, glyphs) + self.segments = segments + + def __str__(self): + lines = [] + ndigits = len(str(max(self.segments))) + for i, tags in sorted(self.segments.items()): + lines.append( + "%s: (%s)" % (str(i).rjust(ndigits), ", ".join(repr(t) for t in tags)) + ) + return "Glyphs named %s have incompatible segment types:\n %s" % ( + self.combined_name, + "\n ".join(lines), + ) + + +class IncompatibleFontsError(Error): + def __init__(self, glyph_errors): + self.glyph_errors = glyph_errors + + def __str__(self): + return "fonts contains incompatible glyphs: %s" % ( + ", ".join(repr(g) for g in sorted(self.glyph_errors.keys())) + ) diff --git a/py311/lib/python3.11/site-packages/fontTools/cu2qu/ufo.py b/py311/lib/python3.11/site-packages/fontTools/cu2qu/ufo.py new file mode 100644 index 0000000000000000000000000000000000000000..db9a1b0384bb9042ffb991e823945395b1959769 --- /dev/null +++ b/py311/lib/python3.11/site-packages/fontTools/cu2qu/ufo.py @@ -0,0 +1,363 @@ +# Copyright 2015 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Converts cubic bezier curves to quadratic splines. + +Conversion is performed such that the quadratic splines keep the same end-curve +tangents as the original cubics. The approach is iterative, increasing the +number of segments for a spline until the error gets below a bound. + +Respective curves from multiple fonts will be converted at once to ensure that +the resulting splines are interpolation-compatible. +""" + +import logging +from fontTools.pens.basePen import AbstractPen +from fontTools.pens.pointPen import PointToSegmentPen +from fontTools.pens.reverseContourPen import ReverseContourPen + +from . import curves_to_quadratic +from .errors import ( + UnequalZipLengthsError, + IncompatibleSegmentNumberError, + IncompatibleSegmentTypesError, + IncompatibleGlyphsError, + IncompatibleFontsError, +) + + +__all__ = ["fonts_to_quadratic", "font_to_quadratic"] + +# The default approximation error below is a relative value (1/1000 of the EM square). +# Later on, we convert it to absolute font units by multiplying it by a font's UPEM +# (see fonts_to_quadratic). +DEFAULT_MAX_ERR = 0.001 +CURVE_TYPE_LIB_KEY = "com.github.googlei18n.cu2qu.curve_type" + +logger = logging.getLogger(__name__) + + +_zip = zip + + +def zip(*args): + """Ensure each argument to zip has the same length. Also make sure a list is + returned for python 2/3 compatibility. + """ + + if len(set(len(a) for a in args)) != 1: + raise UnequalZipLengthsError(*args) + return list(_zip(*args)) + + +class GetSegmentsPen(AbstractPen): + """Pen to collect segments into lists of points for conversion. + + Curves always include their initial on-curve point, so some points are + duplicated between segments. + """ + + def __init__(self): + self._last_pt = None + self.segments = [] + + def _add_segment(self, tag, *args): + if tag in ["move", "line", "qcurve", "curve"]: + self._last_pt = args[-1] + self.segments.append((tag, args)) + + def moveTo(self, pt): + self._add_segment("move", pt) + + def lineTo(self, pt): + self._add_segment("line", pt) + + def qCurveTo(self, *points): + self._add_segment("qcurve", self._last_pt, *points) + + def curveTo(self, *points): + self._add_segment("curve", self._last_pt, *points) + + def closePath(self): + self._add_segment("close") + + def endPath(self): + self._add_segment("end") + + def addComponent(self, glyphName, transformation): + pass + + +def _get_segments(glyph): + """Get a glyph's segments as extracted by GetSegmentsPen.""" + + pen = GetSegmentsPen() + # glyph.draw(pen) + # We can't simply draw the glyph with the pen, but we must initialize the + # PointToSegmentPen explicitly with outputImpliedClosingLine=True. + # By default PointToSegmentPen does not outputImpliedClosingLine -- unless + # last and first point on closed contour are duplicated. Because we are + # converting multiple glyphs at the same time, we want to make sure + # this function returns the same number of segments, whether or not + # the last and first point overlap. + # https://github.com/googlefonts/fontmake/issues/572 + # https://github.com/fonttools/fonttools/pull/1720 + pointPen = PointToSegmentPen(pen, outputImpliedClosingLine=True) + glyph.drawPoints(pointPen) + return pen.segments + + +def _set_segments(glyph, segments, reverse_direction): + """Draw segments as extracted by GetSegmentsPen back to a glyph.""" + + glyph.clearContours() + pen = glyph.getPen() + if reverse_direction: + pen = ReverseContourPen(pen) + for tag, args in segments: + if tag == "move": + pen.moveTo(*args) + elif tag == "line": + pen.lineTo(*args) + elif tag == "curve": + pen.curveTo(*args[1:]) + elif tag == "qcurve": + pen.qCurveTo(*args[1:]) + elif tag == "close": + pen.closePath() + elif tag == "end": + pen.endPath() + else: + raise AssertionError('Unhandled segment type "%s"' % tag) + + +def _segments_to_quadratic(segments, max_err, stats, all_quadratic=True): + """Return quadratic approximations of cubic segments.""" + + assert all(s[0] == "curve" for s in segments), "Non-cubic given to convert" + + new_points = curves_to_quadratic([s[1] for s in segments], max_err, all_quadratic) + n = len(new_points[0]) + assert all(len(s) == n for s in new_points[1:]), "Converted incompatibly" + + spline_length = str(n - 2) + stats[spline_length] = stats.get(spline_length, 0) + 1 + + if all_quadratic or n == 3: + return [("qcurve", p) for p in new_points] + else: + return [("curve", p) for p in new_points] + + +def _glyphs_to_quadratic(glyphs, max_err, reverse_direction, stats, all_quadratic=True): + """Do the actual conversion of a set of compatible glyphs, after arguments + have been set up. + + Empty glyphs (without contours) are ignored and passed through unchanged. + + Return True if the glyphs were modified, else return False. + """ + + # Skip empty glyphs (with zero contours) + non_empty_indices = [i for i, g in enumerate(glyphs) if len(g) > 0] + if not non_empty_indices: + return False + + glyphs = [glyphs[i] for i in non_empty_indices] + max_err = [max_err[i] for i in non_empty_indices] + + try: + segments_by_location = zip(*[_get_segments(g) for g in glyphs]) + except UnequalZipLengthsError: + raise IncompatibleSegmentNumberError(glyphs) + if not any(segments_by_location): + return False + + # always modify input glyphs if reverse_direction is True + glyphs_modified = reverse_direction + + new_segments_by_location = [] + incompatible = {} + for i, segments in enumerate(segments_by_location): + tag = segments[0][0] + if not all(s[0] == tag for s in segments[1:]): + incompatible[i] = [s[0] for s in segments] + elif tag == "curve": + new_segments = _segments_to_quadratic( + segments, max_err, stats, all_quadratic + ) + if all_quadratic or new_segments != segments: + glyphs_modified = True + segments = new_segments + new_segments_by_location.append(segments) + + if glyphs_modified: + new_segments_by_glyph = zip(*new_segments_by_location) + for glyph, new_segments in zip(glyphs, new_segments_by_glyph): + _set_segments(glyph, new_segments, reverse_direction) + + if incompatible: + raise IncompatibleSegmentTypesError(glyphs, segments=incompatible) + return glyphs_modified + + +def glyphs_to_quadratic( + glyphs, max_err=None, reverse_direction=False, stats=None, all_quadratic=True +): + """Convert the curves of a set of compatible of glyphs to quadratic. + + All curves will be converted to quadratic at once, ensuring interpolation + compatibility. If this is not required, calling glyphs_to_quadratic with one + glyph at a time may yield slightly more optimized results. + + Empty glyphs (without contours) are ignored and passed through unchanged. + + Return True if glyphs were modified, else return False. + + Raises IncompatibleGlyphsError if glyphs have non-interpolatable outlines. + """ + if stats is None: + stats = {} + + if not max_err: + # assume 1000 is the default UPEM + max_err = DEFAULT_MAX_ERR * 1000 + + if isinstance(max_err, (list, tuple)): + max_errors = max_err + else: + max_errors = [max_err] * len(glyphs) + assert len(max_errors) == len(glyphs) + + return _glyphs_to_quadratic( + glyphs, max_errors, reverse_direction, stats, all_quadratic + ) + + +def fonts_to_quadratic( + fonts, + max_err_em=None, + max_err=None, + reverse_direction=False, + stats=None, + dump_stats=False, + remember_curve_type=True, + all_quadratic=True, +): + """Convert the curves of a collection of fonts to quadratic. + + All curves will be converted to quadratic at once, ensuring interpolation + compatibility. If this is not required, calling fonts_to_quadratic with one + font at a time may yield slightly more optimized results. + + Empty glyphs (without contours) are ignored and passed through unchanged. + + Return the set of modified glyph names if any, else return an empty set. + + By default, cu2qu stores the curve type in the fonts' lib, under a private + key "com.github.googlei18n.cu2qu.curve_type", and will not try to convert + them again if the curve type is already set to "quadratic". + Setting 'remember_curve_type' to False disables this optimization. + + Raises IncompatibleFontsError if same-named glyphs from different fonts + have non-interpolatable outlines. + """ + + if remember_curve_type: + curve_types = {f.lib.get(CURVE_TYPE_LIB_KEY, "cubic") for f in fonts} + if len(curve_types) == 1: + curve_type = next(iter(curve_types)) + if curve_type in ("quadratic", "mixed"): + logger.info("Curves already converted to quadratic") + return False + elif curve_type == "cubic": + pass # keep converting + else: + raise NotImplementedError(curve_type) + elif len(curve_types) > 1: + # going to crash later if they do differ + logger.warning("fonts may contain different curve types") + + if stats is None: + stats = {} + + if max_err_em and max_err: + raise TypeError("Only one of max_err and max_err_em can be specified.") + if not (max_err_em or max_err): + max_err_em = DEFAULT_MAX_ERR + + if isinstance(max_err, (list, tuple)): + assert len(max_err) == len(fonts) + max_errors = max_err + elif max_err: + max_errors = [max_err] * len(fonts) + + if isinstance(max_err_em, (list, tuple)): + assert len(fonts) == len(max_err_em) + max_errors = [f.info.unitsPerEm * e for f, e in zip(fonts, max_err_em)] + elif max_err_em: + max_errors = [f.info.unitsPerEm * max_err_em for f in fonts] + + modified = set() + glyph_errors = {} + for name in set().union(*(f.keys() for f in fonts)): + glyphs = [] + cur_max_errors = [] + for font, error in zip(fonts, max_errors): + if name in font: + glyphs.append(font[name]) + cur_max_errors.append(error) + try: + if _glyphs_to_quadratic( + glyphs, cur_max_errors, reverse_direction, stats, all_quadratic + ): + modified.add(name) + except IncompatibleGlyphsError as exc: + logger.error(exc) + glyph_errors[name] = exc + + if glyph_errors: + raise IncompatibleFontsError(glyph_errors) + + if modified and dump_stats: + spline_lengths = sorted(stats.keys()) + logger.info( + "New spline lengths: %s" + % (", ".join("%s: %d" % (l, stats[l]) for l in spline_lengths)) + ) + + if remember_curve_type: + for font in fonts: + curve_type = font.lib.get(CURVE_TYPE_LIB_KEY, "cubic") + new_curve_type = "quadratic" if all_quadratic else "mixed" + if curve_type != new_curve_type: + font.lib[CURVE_TYPE_LIB_KEY] = new_curve_type + return modified + + +def glyph_to_quadratic(glyph, **kwargs): + """Convenience wrapper around glyphs_to_quadratic, for just one glyph. + Return True if the glyph was modified, else return False. + """ + + return glyphs_to_quadratic([glyph], **kwargs) + + +def font_to_quadratic(font, **kwargs): + """Convenience wrapper around fonts_to_quadratic, for just one font. + Return the set of modified glyph names if any, else return empty set. + """ + + return fonts_to_quadratic([font], **kwargs) diff --git a/py311/lib/python3.11/site-packages/fontTools/designspaceLib/__init__.py b/py311/lib/python3.11/site-packages/fontTools/designspaceLib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4782041b43a50559b48f1e5de09cd7a6a614f59b --- /dev/null +++ b/py311/lib/python3.11/site-packages/fontTools/designspaceLib/__init__.py @@ -0,0 +1,3343 @@ +""" + designSpaceDocument + + - Read and write designspace files +""" + +from __future__ import annotations + +import collections +import copy +import itertools +import math +import os +import posixpath +from io import BytesIO, StringIO +from textwrap import indent +from typing import Any, Dict, List, MutableMapping, Optional, Tuple, Union, cast + +from fontTools.misc import etree as ET +from fontTools.misc import plistlib +from fontTools.misc.loggingTools import LogMixin +from fontTools.misc.textTools import tobytes, tostr + + +__all__ = [ + "AxisDescriptor", + "AxisLabelDescriptor", + "AxisMappingDescriptor", + "BaseDocReader", + "BaseDocWriter", + "DesignSpaceDocument", + "DesignSpaceDocumentError", + "DiscreteAxisDescriptor", + "InstanceDescriptor", + "LocationLabelDescriptor", + "RangeAxisSubsetDescriptor", + "RuleDescriptor", + "SourceDescriptor", + "ValueAxisSubsetDescriptor", + "VariableFontDescriptor", +] + +# ElementTree allows to find namespace-prefixed elements, but not attributes +# so we have to do it ourselves for 'xml:lang' +XML_NS = "{http://www.w3.org/XML/1998/namespace}" +XML_LANG = XML_NS + "lang" + + +def posix(path): + """Normalize paths using forward slash to work also on Windows.""" + new_path = posixpath.join(*path.split(os.path.sep)) + if path.startswith("/"): + # The above transformation loses absolute paths + new_path = "/" + new_path + elif path.startswith(r"\\"): + # The above transformation loses leading slashes of UNC path mounts + new_path = "//" + new_path + return new_path + + +def posixpath_property(private_name): + """Generate a propery that holds a path always using forward slashes.""" + + def getter(self): + # Normal getter + return getattr(self, private_name) + + def setter(self, value): + # The setter rewrites paths using forward slashes + if value is not None: + value = posix(value) + setattr(self, private_name, value) + + return property(getter, setter) + + +class DesignSpaceDocumentError(Exception): + def __init__(self, msg, obj=None): + self.msg = msg + self.obj = obj + + def __str__(self): + return str(self.msg) + (": %r" % self.obj if self.obj is not None else "") + + +class AsDictMixin(object): + def asdict(self): + d = {} + for attr, value in self.__dict__.items(): + if attr.startswith("_"): + continue + if hasattr(value, "asdict"): + value = value.asdict() + elif isinstance(value, list): + value = [v.asdict() if hasattr(v, "asdict") else v for v in value] + d[attr] = value + return d + + +class SimpleDescriptor(AsDictMixin): + """Containers for a bunch of attributes""" + + # XXX this is ugly. The 'print' is inappropriate here, and instead of + # assert, it should simply return True/False + def compare(self, other): + # test if this object contains the same data as the other + for attr in self._attrs: + try: + assert getattr(self, attr) == getattr(other, attr) + except AssertionError: + print( + "failed attribute", + attr, + getattr(self, attr), + "!=", + getattr(other, attr), + ) + + def __repr__(self): + attrs = [f"{a}={repr(getattr(self, a))}," for a in self._attrs] + attrs = indent("\n".join(attrs), " ") + return f"{self.__class__.__name__}(\n{attrs}\n)" + + +class SourceDescriptor(SimpleDescriptor): + """Simple container for data related to the source + + .. code:: python + + doc = DesignSpaceDocument() + s1 = SourceDescriptor() + s1.path = masterPath1 + s1.name = "master.ufo1" + s1.font = defcon.Font("master.ufo1") + s1.location = dict(weight=0) + s1.familyName = "MasterFamilyName" + s1.styleName = "MasterStyleNameOne" + s1.localisedFamilyName = dict(fr="Caractère") + s1.mutedGlyphNames.append("A") + s1.mutedGlyphNames.append("Z") + doc.addSource(s1) + + """ + + flavor = "source" + _attrs = [ + "filename", + "path", + "name", + "layerName", + "location", + "copyLib", + "copyGroups", + "copyFeatures", + "muteKerning", + "muteInfo", + "mutedGlyphNames", + "familyName", + "styleName", + "localisedFamilyName", + ] + + filename = posixpath_property("_filename") + path = posixpath_property("_path") + + def __init__( + self, + *, + filename=None, + path=None, + font=None, + name=None, + location=None, + designLocation=None, + layerName=None, + familyName=None, + styleName=None, + localisedFamilyName=None, + copyLib=False, + copyInfo=False, + copyGroups=False, + copyFeatures=False, + muteKerning=False, + muteInfo=False, + mutedGlyphNames=None, + ): + self.filename = filename + """string. A relative path to the source file, **as it is in the document**. + + MutatorMath + VarLib. + """ + self.path = path + """The absolute path, calculated from filename.""" + + self.font = font + """Any Python object. Optional. Points to a representation of this + source font that is loaded in memory, as a Python object (e.g. a + ``defcon.Font`` or a ``fontTools.ttFont.TTFont``). + + The default document reader will not fill-in this attribute, and the + default writer will not use this attribute. It is up to the user of + ``designspaceLib`` to either load the resource identified by + ``filename`` and store it in this field, or write the contents of + this field to the disk and make ```filename`` point to that. + """ + + self.name = name + """string. Optional. Unique identifier name for this source. + + MutatorMath + varLib. + """ + + self.designLocation = ( + designLocation if designLocation is not None else location or {} + ) + """dict. Axis values for this source, in design space coordinates. + + MutatorMath + varLib. + + This may be only part of the full design location. + See :meth:`getFullDesignLocation()` + + .. versionadded:: 5.0 + """ + + self.layerName = layerName + """string. The name of the layer in the source to look for + outline data. Default ``None`` which means ``foreground``. + """ + self.familyName = familyName + """string. Family name of this source. Though this data + can be extracted from the font, it can be efficient to have it right + here. + + varLib. + """ + self.styleName = styleName + """string. Style name of this source. Though this data + can be extracted from the font, it can be efficient to have it right + here. + + varLib. + """ + self.localisedFamilyName = localisedFamilyName or {} + """dict. A dictionary of localised family name strings, keyed by + language code. + + If present, will be used to build localized names for all instances. + + .. versionadded:: 5.0 + """ + + self.copyLib = copyLib + """bool. Indicates if the contents of the font.lib need to + be copied to the instances. + + MutatorMath. + + .. deprecated:: 5.0 + """ + self.copyInfo = copyInfo + """bool. Indicates if the non-interpolating font.info needs + to be copied to the instances. + + MutatorMath. + + .. deprecated:: 5.0 + """ + self.copyGroups = copyGroups + """bool. Indicates if the groups need to be copied to the + instances. + + MutatorMath. + + .. deprecated:: 5.0 + """ + self.copyFeatures = copyFeatures + """bool. Indicates if the feature text needs to be + copied to the instances. + + MutatorMath. + + .. deprecated:: 5.0 + """ + self.muteKerning = muteKerning + """bool. Indicates if the kerning data from this source + needs to be muted (i.e. not be part of the calculations). + + MutatorMath only. + """ + self.muteInfo = muteInfo + """bool. Indicated if the interpolating font.info data for + this source needs to be muted. + + MutatorMath only. + """ + self.mutedGlyphNames = mutedGlyphNames or [] + """list. Glyphnames that need to be muted in the + instances. + + MutatorMath only. + """ + + @property + def location(self): + """dict. Axis values for this source, in design space coordinates. + + MutatorMath + varLib. + + .. deprecated:: 5.0 + Use the more explicit alias for this property :attr:`designLocation`. + """ + return self.designLocation + + @location.setter + def location(self, location: Optional[SimpleLocationDict]): + self.designLocation = location or {} + + def setFamilyName(self, familyName, languageCode="en"): + """Setter for :attr:`localisedFamilyName` + + .. versionadded:: 5.0 + """ + self.localisedFamilyName[languageCode] = tostr(familyName) + + def getFamilyName(self, languageCode="en"): + """Getter for :attr:`localisedFamilyName` + + .. versionadded:: 5.0 + """ + return self.localisedFamilyName.get(languageCode) + + def getFullDesignLocation(self, doc: "DesignSpaceDocument") -> SimpleLocationDict: + """Get the complete design location of this source, from its + :attr:`designLocation` and the document's axis defaults. + + .. versionadded:: 5.0 + """ + result: SimpleLocationDict = {} + for axis in doc.axes: + if axis.name in self.designLocation: + result[axis.name] = self.designLocation[axis.name] + else: + result[axis.name] = axis.map_forward(axis.default) + return result + + +class RuleDescriptor(SimpleDescriptor): + """Represents the rule descriptor element: a set of glyph substitutions to + trigger conditionally in some parts of the designspace. + + .. code:: python + + r1 = RuleDescriptor() + r1.name = "unique.rule.name" + r1.conditionSets.append([dict(name="weight", minimum=-10, maximum=10), dict(...)]) + r1.conditionSets.append([dict(...), dict(...)]) + r1.subs.append(("a", "a.alt")) + + .. code:: xml + + + + + + + + + + + + + + """ + + _attrs = ["name", "conditionSets", "subs"] # what do we need here + + def __init__(self, *, name=None, conditionSets=None, subs=None): + self.name = name + """string. Unique name for this rule. Can be used to reference this rule data.""" + # list of lists of dict(name='aaaa', minimum=0, maximum=1000) + self.conditionSets = conditionSets or [] + """a list of conditionsets. + + - Each conditionset is a list of conditions. + - Each condition is a dict with ``name``, ``minimum`` and ``maximum`` keys. + """ + # list of substitutions stored as tuples of glyphnames ("a", "a.alt") + self.subs = subs or [] + """list of substitutions. + + - Each substitution is stored as tuples of glyphnames, e.g. ("a", "a.alt"). + - Note: By default, rules are applied first, before other text + shaping/OpenType layout, as they are part of the + `Required Variation Alternates OpenType feature `_. + See ref:`rules-element` § Attributes. + """ + + +def evaluateRule(rule, location): + """Return True if any of the rule's conditionsets matches the given location.""" + return any(evaluateConditions(c, location) for c in rule.conditionSets) + + +def evaluateConditions(conditions, location): + """Return True if all the conditions matches the given location. + + - If a condition has no minimum, check for < maximum. + - If a condition has no maximum, check for > minimum. + """ + for cd in conditions: + value = location[cd["name"]] + if cd.get("minimum") is None: + if value > cd["maximum"]: + return False + elif cd.get("maximum") is None: + if cd["minimum"] > value: + return False + elif not cd["minimum"] <= value <= cd["maximum"]: + return False + return True + + +def processRules(rules, location, glyphNames): + """Apply these rules at this location to these glyphnames. + + Return a new list of glyphNames with substitutions applied. + + - rule order matters + """ + newNames = [] + for rule in rules: + if evaluateRule(rule, location): + for name in glyphNames: + swap = False + for a, b in rule.subs: + if name == a: + swap = True + break + if swap: + newNames.append(b) + else: + newNames.append(name) + glyphNames = newNames + newNames = [] + return glyphNames + + +AnisotropicLocationDict = Dict[str, Union[float, Tuple[float, float]]] +SimpleLocationDict = Dict[str, float] + + +class AxisMappingDescriptor(SimpleDescriptor): + """Represents the axis mapping element: mapping an input location + to an output location in the designspace. + + .. code:: python + + m1 = AxisMappingDescriptor() + m1.inputLocation = {"weight": 900, "width": 150} + m1.outputLocation = {"weight": 870} + + .. code:: xml + + + + + + + + + + + + + """ + + _attrs = ["inputLocation", "outputLocation"] + + def __init__( + self, + *, + inputLocation=None, + outputLocation=None, + description=None, + groupDescription=None, + ): + self.inputLocation: SimpleLocationDict = inputLocation or {} + """dict. Axis values for the input of the mapping, in design space coordinates. + + varLib. + + .. versionadded:: 5.1 + """ + self.outputLocation: SimpleLocationDict = outputLocation or {} + """dict. Axis values for the output of the mapping, in design space coordinates. + + varLib. + + .. versionadded:: 5.1 + """ + self.description = description + """string. A description of the mapping. + + varLib. + + .. versionadded:: 5.2 + """ + self.groupDescription = groupDescription + """string. A description of the group of mappings. + + varLib. + + .. versionadded:: 5.2 + """ + + +class InstanceDescriptor(SimpleDescriptor): + """Simple container for data related to the instance + + + .. code:: python + + i2 = InstanceDescriptor() + i2.path = instancePath2 + i2.familyName = "InstanceFamilyName" + i2.styleName = "InstanceStyleName" + i2.name = "instance.ufo2" + # anisotropic location + i2.designLocation = dict(weight=500, width=(400,300)) + i2.postScriptFontName = "InstancePostscriptName" + i2.styleMapFamilyName = "InstanceStyleMapFamilyName" + i2.styleMapStyleName = "InstanceStyleMapStyleName" + i2.lib['com.coolDesignspaceApp.specimenText'] = 'Hamburgerwhatever' + doc.addInstance(i2) + """ + + flavor = "instance" + _defaultLanguageCode = "en" + _attrs = [ + "filename", + "path", + "name", + "locationLabel", + "designLocation", + "userLocation", + "familyName", + "styleName", + "postScriptFontName", + "styleMapFamilyName", + "styleMapStyleName", + "localisedFamilyName", + "localisedStyleName", + "localisedStyleMapFamilyName", + "localisedStyleMapStyleName", + "glyphs", + "kerning", + "info", + "lib", + ] + + filename = posixpath_property("_filename") + path = posixpath_property("_path") + + def __init__( + self, + *, + filename=None, + path=None, + font=None, + name=None, + location=None, + locationLabel=None, + designLocation=None, + userLocation=None, + familyName=None, + styleName=None, + postScriptFontName=None, + styleMapFamilyName=None, + styleMapStyleName=None, + localisedFamilyName=None, + localisedStyleName=None, + localisedStyleMapFamilyName=None, + localisedStyleMapStyleName=None, + glyphs=None, + kerning=True, + info=True, + lib=None, + ): + self.filename = filename + """string. Relative path to the instance file, **as it is + in the document**. The file may or may not exist. + + MutatorMath + VarLib. + """ + self.path = path + """string. Absolute path to the instance file, calculated from + the document path and the string in the filename attr. The file may + or may not exist. + + MutatorMath. + """ + self.font = font + """Same as :attr:`SourceDescriptor.font` + + .. seealso:: :attr:`SourceDescriptor.font` + """ + self.name = name + """string. Unique identifier name of the instance, used to + identify it if it needs to be referenced from elsewhere in the + document. + """ + self.locationLabel = locationLabel + """Name of a :class:`LocationLabelDescriptor`. If + provided, the instance should have the same location as the + LocationLabel. + + .. seealso:: + :meth:`getFullDesignLocation` + :meth:`getFullUserLocation` + + .. versionadded:: 5.0 + """ + self.designLocation: AnisotropicLocationDict = ( + designLocation if designLocation is not None else (location or {}) + ) + """dict. Axis values for this instance, in design space coordinates. + + MutatorMath + varLib. + + .. seealso:: This may be only part of the full location. See: + :meth:`getFullDesignLocation` + :meth:`getFullUserLocation` + + .. versionadded:: 5.0 + """ + self.userLocation: SimpleLocationDict = userLocation or {} + """dict. Axis values for this instance, in user space coordinates. + + MutatorMath + varLib. + + .. seealso:: This may be only part of the full location. See: + :meth:`getFullDesignLocation` + :meth:`getFullUserLocation` + + .. versionadded:: 5.0 + """ + self.familyName = familyName + """string. Family name of this instance. + + MutatorMath + varLib. + """ + self.styleName = styleName + """string. Style name of this instance. + + MutatorMath + varLib. + """ + self.postScriptFontName = postScriptFontName + """string. Postscript fontname for this instance. + + MutatorMath + varLib. + """ + self.styleMapFamilyName = styleMapFamilyName + """string. StyleMap familyname for this instance. + + MutatorMath + varLib. + """ + self.styleMapStyleName = styleMapStyleName + """string. StyleMap stylename for this instance. + + MutatorMath + varLib. + """ + self.localisedFamilyName = localisedFamilyName or {} + """dict. A dictionary of localised family name + strings, keyed by language code. + """ + self.localisedStyleName = localisedStyleName or {} + """dict. A dictionary of localised stylename + strings, keyed by language code. + """ + self.localisedStyleMapFamilyName = localisedStyleMapFamilyName or {} + """A dictionary of localised style map + familyname strings, keyed by language code. + """ + self.localisedStyleMapStyleName = localisedStyleMapStyleName or {} + """A dictionary of localised style map + stylename strings, keyed by language code. + """ + self.glyphs = glyphs or {} + """dict for special master definitions for glyphs. If glyphs + need special masters (to record the results of executed rules for + example). + + MutatorMath. + + .. deprecated:: 5.0 + Use rules or sparse sources instead. + """ + self.kerning = kerning + """ bool. Indicates if this instance needs its kerning + calculated. + + MutatorMath. + + .. deprecated:: 5.0 + """ + self.info = info + """bool. Indicated if this instance needs the interpolating + font.info calculated. + + .. deprecated:: 5.0 + """ + + self.lib = lib or {} + """Custom data associated with this instance.""" + + @property + def location(self): + """dict. Axis values for this instance. + + MutatorMath + varLib. + + .. deprecated:: 5.0 + Use the more explicit alias for this property :attr:`designLocation`. + """ + return self.designLocation + + @location.setter + def location(self, location: Optional[AnisotropicLocationDict]): + self.designLocation = location or {} + + def setStyleName(self, styleName, languageCode="en"): + """These methods give easier access to the localised names.""" + self.localisedStyleName[languageCode] = tostr(styleName) + + def getStyleName(self, languageCode="en"): + return self.localisedStyleName.get(languageCode) + + def setFamilyName(self, familyName, languageCode="en"): + self.localisedFamilyName[languageCode] = tostr(familyName) + + def getFamilyName(self, languageCode="en"): + return self.localisedFamilyName.get(languageCode) + + def setStyleMapStyleName(self, styleMapStyleName, languageCode="en"): + self.localisedStyleMapStyleName[languageCode] = tostr(styleMapStyleName) + + def getStyleMapStyleName(self, languageCode="en"): + return self.localisedStyleMapStyleName.get(languageCode) + + def setStyleMapFamilyName(self, styleMapFamilyName, languageCode="en"): + self.localisedStyleMapFamilyName[languageCode] = tostr(styleMapFamilyName) + + def getStyleMapFamilyName(self, languageCode="en"): + return self.localisedStyleMapFamilyName.get(languageCode) + + def clearLocation(self, axisName: Optional[str] = None): + """Clear all location-related fields. Ensures that + :attr:``designLocation`` and :attr:``userLocation`` are dictionaries + (possibly empty if clearing everything). + + In order to update the location of this instance wholesale, a user + should first clear all the fields, then change the field(s) for which + they have data. + + .. code:: python + + instance.clearLocation() + instance.designLocation = {'Weight': (34, 36.5), 'Width': 100} + instance.userLocation = {'Opsz': 16} + + In order to update a single axis location, the user should only clear + that axis, then edit the values: + + .. code:: python + + instance.clearLocation('Weight') + instance.designLocation['Weight'] = (34, 36.5) + + Args: + axisName: if provided, only clear the location for that axis. + + .. versionadded:: 5.0 + """ + self.locationLabel = None + if axisName is None: + self.designLocation = {} + self.userLocation = {} + else: + if self.designLocation is None: + self.designLocation = {} + if axisName in self.designLocation: + del self.designLocation[axisName] + if self.userLocation is None: + self.userLocation = {} + if axisName in self.userLocation: + del self.userLocation[axisName] + + def getLocationLabelDescriptor( + self, doc: "DesignSpaceDocument" + ) -> Optional[LocationLabelDescriptor]: + """Get the :class:`LocationLabelDescriptor` instance that matches + this instances's :attr:`locationLabel`. + + Raises if the named label can't be found. + + .. versionadded:: 5.0 + """ + if self.locationLabel is None: + return None + label = doc.getLocationLabel(self.locationLabel) + if label is None: + raise DesignSpaceDocumentError( + "InstanceDescriptor.getLocationLabelDescriptor(): " + f"unknown location label `{self.locationLabel}` in instance `{self.name}`." + ) + return label + + def getFullDesignLocation( + self, doc: "DesignSpaceDocument" + ) -> AnisotropicLocationDict: + """Get the complete design location of this instance, by combining data + from the various location fields, default axis values and mappings, and + top-level location labels. + + The source of truth for this instance's location is determined for each + axis independently by taking the first not-None field in this list: + + - ``locationLabel``: the location along this axis is the same as the + matching STAT format 4 label. No anisotropy. + - ``designLocation[axisName]``: the explicit design location along this + axis, possibly anisotropic. + - ``userLocation[axisName]``: the explicit user location along this + axis. No anisotropy. + - ``axis.default``: default axis value. No anisotropy. + + .. versionadded:: 5.0 + """ + label = self.getLocationLabelDescriptor(doc) + if label is not None: + return doc.map_forward(label.userLocation) # type: ignore + result: AnisotropicLocationDict = {} + for axis in doc.axes: + if axis.name in self.designLocation: + result[axis.name] = self.designLocation[axis.name] + elif axis.name in self.userLocation: + result[axis.name] = axis.map_forward(self.userLocation[axis.name]) + else: + result[axis.name] = axis.map_forward(axis.default) + return result + + def getFullUserLocation(self, doc: "DesignSpaceDocument") -> SimpleLocationDict: + """Get the complete user location for this instance. + + .. seealso:: :meth:`getFullDesignLocation` + + .. versionadded:: 5.0 + """ + return doc.map_backward(self.getFullDesignLocation(doc)) + + +def tagForAxisName(name): + # try to find or make a tag name for this axis name + names = { + "weight": ("wght", dict(en="Weight")), + "width": ("wdth", dict(en="Width")), + "optical": ("opsz", dict(en="Optical Size")), + "slant": ("slnt", dict(en="Slant")), + "italic": ("ital", dict(en="Italic")), + } + if name.lower() in names: + return names[name.lower()] + if len(name) < 4: + tag = name + "*" * (4 - len(name)) + else: + tag = name[:4] + return tag, dict(en=name) + + +class AbstractAxisDescriptor(SimpleDescriptor): + flavor = "axis" + + def __init__( + self, + *, + tag=None, + name=None, + labelNames=None, + hidden=False, + map=None, + axisOrdering=None, + axisLabels=None, + ): + # opentype tag for this axis + self.tag = tag + """string. Four letter tag for this axis. Some might be + registered at the `OpenType + specification `__. + Privately-defined axis tags must begin with an uppercase letter and + use only uppercase letters or digits. + """ + # name of the axis used in locations + self.name = name + """string. Name of the axis as it is used in the location dicts. + + MutatorMath + varLib. + """ + # names for UI purposes, if this is not a standard axis, + self.labelNames = labelNames or {} + """dict. When defining a non-registered axis, it will be + necessary to define user-facing readable names for the axis. Keyed by + xml:lang code. Values are required to be ``unicode`` strings, even if + they only contain ASCII characters. + """ + self.hidden = hidden + """bool. Whether this axis should be hidden in user interfaces. + """ + self.map = map or [] + """list of input / output values that can describe a warp of user space + to design space coordinates. If no map values are present, it is assumed + user space is the same as design space, as in [(minimum, minimum), + (maximum, maximum)]. + + varLib. + """ + self.axisOrdering = axisOrdering + """STAT table field ``axisOrdering``. + + See: `OTSpec STAT Axis Record `_ + + .. versionadded:: 5.0 + """ + self.axisLabels: List[AxisLabelDescriptor] = axisLabels or [] + """STAT table entries for Axis Value Tables format 1, 2, 3. + + See: `OTSpec STAT Axis Value Tables `_ + + .. versionadded:: 5.0 + """ + + +class AxisDescriptor(AbstractAxisDescriptor): + """Simple container for the axis data. + + Add more localisations? + + .. code:: python + + a1 = AxisDescriptor() + a1.minimum = 1 + a1.maximum = 1000 + a1.default = 400 + a1.name = "weight" + a1.tag = "wght" + a1.labelNames['fa-IR'] = "قطر" + a1.labelNames['en'] = "Wéíght" + a1.map = [(1.0, 10.0), (400.0, 66.0), (1000.0, 990.0)] + a1.axisOrdering = 1 + a1.axisLabels = [ + AxisLabelDescriptor(name="Regular", userValue=400, elidable=True) + ] + doc.addAxis(a1) + """ + + _attrs = [ + "tag", + "name", + "maximum", + "minimum", + "default", + "map", + "axisOrdering", + "axisLabels", + ] + + def __init__( + self, + *, + tag=None, + name=None, + labelNames=None, + minimum=None, + default=None, + maximum=None, + hidden=False, + map=None, + axisOrdering=None, + axisLabels=None, + ): + super().__init__( + tag=tag, + name=name, + labelNames=labelNames, + hidden=hidden, + map=map, + axisOrdering=axisOrdering, + axisLabels=axisLabels, + ) + self.minimum = minimum + """number. The minimum value for this axis in user space. + + MutatorMath + varLib. + """ + self.maximum = maximum + """number. The maximum value for this axis in user space. + + MutatorMath + varLib. + """ + self.default = default + """number. The default value for this axis, i.e. when a new location is + created, this is the value this axis will get in user space. + + MutatorMath + varLib. + """ + + def serialize(self): + # output to a dict, used in testing + return dict( + tag=self.tag, + name=self.name, + labelNames=self.labelNames, + maximum=self.maximum, + minimum=self.minimum, + default=self.default, + hidden=self.hidden, + map=self.map, + axisOrdering=self.axisOrdering, + axisLabels=self.axisLabels, + ) + + def map_forward(self, v): + """Maps value from axis mapping's input (user) to output (design).""" + from fontTools.varLib.models import piecewiseLinearMap + + if not self.map: + return v + return piecewiseLinearMap(v, {k: v for k, v in self.map}) + + def map_backward(self, v): + """Maps value from axis mapping's output (design) to input (user).""" + from fontTools.varLib.models import piecewiseLinearMap + + if isinstance(v, tuple): + v = v[0] + if not self.map: + return v + return piecewiseLinearMap(v, {v: k for k, v in self.map}) + + +class DiscreteAxisDescriptor(AbstractAxisDescriptor): + """Container for discrete axis data. + + Use this for axes that do not interpolate. The main difference from a + continuous axis is that a continuous axis has a ``minimum`` and ``maximum``, + while a discrete axis has a list of ``values``. + + Example: an Italic axis with 2 stops, Roman and Italic, that are not + compatible. The axis still allows to bind together the full font family, + which is useful for the STAT table, however it can't become a variation + axis in a VF. + + .. code:: python + + a2 = DiscreteAxisDescriptor() + a2.values = [0, 1] + a2.default = 0 + a2.name = "Italic" + a2.tag = "ITAL" + a2.labelNames['fr'] = "Italique" + a2.map = [(0, 0), (1, -11)] + a2.axisOrdering = 2 + a2.axisLabels = [ + AxisLabelDescriptor(name="Roman", userValue=0, elidable=True) + ] + doc.addAxis(a2) + + .. versionadded:: 5.0 + """ + + flavor = "axis" + _attrs = ("tag", "name", "values", "default", "map", "axisOrdering", "axisLabels") + + def __init__( + self, + *, + tag=None, + name=None, + labelNames=None, + values=None, + default=None, + hidden=False, + map=None, + axisOrdering=None, + axisLabels=None, + ): + super().__init__( + tag=tag, + name=name, + labelNames=labelNames, + hidden=hidden, + map=map, + axisOrdering=axisOrdering, + axisLabels=axisLabels, + ) + self.default: float = default + """The default value for this axis, i.e. when a new location is + created, this is the value this axis will get in user space. + + However, this default value is less important than in continuous axes: + + - it doesn't define the "neutral" version of outlines from which + deltas would apply, as this axis does not interpolate. + - it doesn't provide the reference glyph set for the designspace, as + fonts at each value can have different glyph sets. + """ + self.values: List[float] = values or [] + """List of possible values for this axis. Contrary to continuous axes, + only the values in this list can be taken by the axis, nothing in-between. + """ + + def map_forward(self, value): + """Maps value from axis mapping's input to output. + + Returns value unchanged if no mapping entry is found. + + Note: for discrete axes, each value must have its mapping entry, if + you intend that value to be mapped. + """ + return next((v for k, v in self.map if k == value), value) + + def map_backward(self, value): + """Maps value from axis mapping's output to input. + + Returns value unchanged if no mapping entry is found. + + Note: for discrete axes, each value must have its mapping entry, if + you intend that value to be mapped. + """ + if isinstance(value, tuple): + value = value[0] + return next((k for k, v in self.map if v == value), value) + + +class AxisLabelDescriptor(SimpleDescriptor): + """Container for axis label data. + + Analogue of OpenType's STAT data for a single axis (formats 1, 2 and 3). + All values are user values. + See: `OTSpec STAT Axis value table, format 1, 2, 3 `_ + + The STAT format of the Axis value depends on which field are filled-in, + see :meth:`getFormat` + + .. versionadded:: 5.0 + """ + + flavor = "label" + _attrs = ( + "userMinimum", + "userValue", + "userMaximum", + "name", + "elidable", + "olderSibling", + "linkedUserValue", + "labelNames", + ) + + def __init__( + self, + *, + name, + userValue, + userMinimum=None, + userMaximum=None, + elidable=False, + olderSibling=False, + linkedUserValue=None, + labelNames=None, + ): + self.userMinimum: Optional[float] = userMinimum + """STAT field ``rangeMinValue`` (format 2).""" + self.userValue: float = userValue + """STAT field ``value`` (format 1, 3) or ``nominalValue`` (format 2).""" + self.userMaximum: Optional[float] = userMaximum + """STAT field ``rangeMaxValue`` (format 2).""" + self.name: str = name + """Label for this axis location, STAT field ``valueNameID``.""" + self.elidable: bool = elidable + """STAT flag ``ELIDABLE_AXIS_VALUE_NAME``. + + See: `OTSpec STAT Flags `_ + """ + self.olderSibling: bool = olderSibling + """STAT flag ``OLDER_SIBLING_FONT_ATTRIBUTE``. + + See: `OTSpec STAT Flags `_ + """ + self.linkedUserValue: Optional[float] = linkedUserValue + """STAT field ``linkedValue`` (format 3).""" + self.labelNames: MutableMapping[str, str] = labelNames or {} + """User-facing translations of this location's label. Keyed by + ``xml:lang`` code. + """ + + def getFormat(self) -> int: + """Determine which format of STAT Axis value to use to encode this label. + + =========== ========= =========== =========== =============== + STAT Format userValue userMinimum userMaximum linkedUserValue + =========== ========= =========== =========== =============== + 1 ✅ ❌ ❌ ❌ + 2 ✅ ✅ ✅ ❌ + 3 ✅ ❌ ❌ ✅ + =========== ========= =========== =========== =============== + """ + if self.linkedUserValue is not None: + return 3 + if self.userMinimum is not None or self.userMaximum is not None: + return 2 + return 1 + + @property + def defaultName(self) -> str: + """Return the English name from :attr:`labelNames` or the :attr:`name`.""" + return self.labelNames.get("en") or self.name + + +class LocationLabelDescriptor(SimpleDescriptor): + """Container for location label data. + + Analogue of OpenType's STAT data for a free-floating location (format 4). + All values are user values. + + See: `OTSpec STAT Axis value table, format 4 `_ + + .. versionadded:: 5.0 + """ + + flavor = "label" + _attrs = ("name", "elidable", "olderSibling", "userLocation", "labelNames") + + def __init__( + self, + *, + name, + userLocation, + elidable=False, + olderSibling=False, + labelNames=None, + ): + self.name: str = name + """Label for this named location, STAT field ``valueNameID``.""" + self.userLocation: SimpleLocationDict = userLocation or {} + """Location in user coordinates along each axis. + + If an axis is not mentioned, it is assumed to be at its default location. + + .. seealso:: This may be only part of the full location. See: + :meth:`getFullUserLocation` + """ + self.elidable: bool = elidable + """STAT flag ``ELIDABLE_AXIS_VALUE_NAME``. + + See: `OTSpec STAT Flags `_ + """ + self.olderSibling: bool = olderSibling + """STAT flag ``OLDER_SIBLING_FONT_ATTRIBUTE``. + + See: `OTSpec STAT Flags `_ + """ + self.labelNames: Dict[str, str] = labelNames or {} + """User-facing translations of this location's label. Keyed by + xml:lang code. + """ + + @property + def defaultName(self) -> str: + """Return the English name from :attr:`labelNames` or the :attr:`name`.""" + return self.labelNames.get("en") or self.name + + def getFullUserLocation(self, doc: "DesignSpaceDocument") -> SimpleLocationDict: + """Get the complete user location of this label, by combining data + from the explicit user location and default axis values. + + .. versionadded:: 5.0 + """ + return { + axis.name: self.userLocation.get(axis.name, axis.default) + for axis in doc.axes + } + + +class VariableFontDescriptor(SimpleDescriptor): + """Container for variable fonts, sub-spaces of the Designspace. + + Use-cases: + + - From a single DesignSpace with discrete axes, define 1 variable font + per value on the discrete axes. Before version 5, you would have needed + 1 DesignSpace per such variable font, and a lot of data duplication. + - From a big variable font with many axes, define subsets of that variable + font that only include some axes and freeze other axes at a given location. + + .. versionadded:: 5.0 + """ + + flavor = "variable-font" + _attrs = ("filename", "axisSubsets", "lib") + + filename = posixpath_property("_filename") + + def __init__(self, *, name, filename=None, axisSubsets=None, lib=None): + self.name: str = name + """string, required. Name of this variable to identify it during the + build process and from other parts of the document, and also as a + filename in case the filename property is empty. + + VarLib. + """ + self.filename: str = filename + """string, optional. Relative path to the variable font file, **as it is + in the document**. The file may or may not exist. + + If not specified, the :attr:`name` will be used as a basename for the file. + + .. note:: + This is intended to be a simple filename (basename or stem) only. + Build tools will only use the basename component and ignore any + directory separators for security reasons. + """ + self.axisSubsets: List[ + Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor] + ] = (axisSubsets or []) + """Axis subsets to include in this variable font. + + If an axis is not mentioned, assume that we only want the default + location of that axis (same as a :class:`ValueAxisSubsetDescriptor`). + """ + self.lib: MutableMapping[str, Any] = lib or {} + """Custom data associated with this variable font.""" + + +class RangeAxisSubsetDescriptor(SimpleDescriptor): + """Subset of a continuous axis to include in a variable font. + + .. versionadded:: 5.0 + """ + + flavor = "axis-subset" + _attrs = ("name", "userMinimum", "userDefault", "userMaximum") + + def __init__( + self, *, name, userMinimum=-math.inf, userDefault=None, userMaximum=math.inf + ): + self.name: str = name + """Name of the :class:`AxisDescriptor` to subset.""" + self.userMinimum: float = userMinimum + """New minimum value of the axis in the target variable font. + If not specified, assume the same minimum value as the full axis. + (default = ``-math.inf``) + """ + self.userDefault: Optional[float] = userDefault + """New default value of the axis in the target variable font. + If not specified, assume the same default value as the full axis. + (default = ``None``) + """ + self.userMaximum: float = userMaximum + """New maximum value of the axis in the target variable font. + If not specified, assume the same maximum value as the full axis. + (default = ``math.inf``) + """ + + +class ValueAxisSubsetDescriptor(SimpleDescriptor): + """Single value of a discrete or continuous axis to use in a variable font. + + .. versionadded:: 5.0 + """ + + flavor = "axis-subset" + _attrs = ("name", "userValue") + + def __init__(self, *, name, userValue): + self.name: str = name + """Name of the :class:`AxisDescriptor` or :class:`DiscreteAxisDescriptor` + to "snapshot" or "freeze". + """ + self.userValue: float = userValue + """Value in user coordinates at which to freeze the given axis.""" + + +class BaseDocWriter(object): + _whiteSpace = " " + axisDescriptorClass = AxisDescriptor + discreteAxisDescriptorClass = DiscreteAxisDescriptor + axisLabelDescriptorClass = AxisLabelDescriptor + axisMappingDescriptorClass = AxisMappingDescriptor + locationLabelDescriptorClass = LocationLabelDescriptor + ruleDescriptorClass = RuleDescriptor + sourceDescriptorClass = SourceDescriptor + variableFontDescriptorClass = VariableFontDescriptor + valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor + rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor + instanceDescriptorClass = InstanceDescriptor + + @classmethod + def getAxisDecriptor(cls): + return cls.axisDescriptorClass() + + @classmethod + def getAxisMappingDescriptor(cls): + return cls.axisMappingDescriptorClass() + + @classmethod + def getSourceDescriptor(cls): + return cls.sourceDescriptorClass() + + @classmethod + def getInstanceDescriptor(cls): + return cls.instanceDescriptorClass() + + @classmethod + def getRuleDescriptor(cls): + return cls.ruleDescriptorClass() + + def __init__(self, documentPath, documentObject: DesignSpaceDocument): + self.path = documentPath + self.documentObject = documentObject + self.effectiveFormatTuple = self._getEffectiveFormatTuple() + self.root = ET.Element("designspace") + + def write(self, pretty=True, encoding="UTF-8", xml_declaration=True): + self.root.attrib["format"] = ".".join(str(i) for i in self.effectiveFormatTuple) + + if ( + self.documentObject.axes + or self.documentObject.axisMappings + or self.documentObject.elidedFallbackName is not None + ): + axesElement = ET.Element("axes") + if self.documentObject.elidedFallbackName is not None: + axesElement.attrib["elidedfallbackname"] = ( + self.documentObject.elidedFallbackName + ) + self.root.append(axesElement) + for axisObject in self.documentObject.axes: + self._addAxis(axisObject) + + if self.documentObject.axisMappings: + mappingsElement = None + lastGroup = object() + for mappingObject in self.documentObject.axisMappings: + if getattr(mappingObject, "groupDescription", None) != lastGroup: + if mappingsElement is not None: + self.root.findall(".axes")[0].append(mappingsElement) + lastGroup = getattr(mappingObject, "groupDescription", None) + mappingsElement = ET.Element("mappings") + if lastGroup is not None: + mappingsElement.attrib["description"] = lastGroup + self._addAxisMapping(mappingsElement, mappingObject) + if mappingsElement is not None: + self.root.findall(".axes")[0].append(mappingsElement) + + if self.documentObject.locationLabels: + labelsElement = ET.Element("labels") + for labelObject in self.documentObject.locationLabels: + self._addLocationLabel(labelsElement, labelObject) + self.root.append(labelsElement) + + if self.documentObject.rules: + if getattr(self.documentObject, "rulesProcessingLast", False): + attributes = {"processing": "last"} + else: + attributes = {} + self.root.append(ET.Element("rules", attributes)) + for ruleObject in self.documentObject.rules: + self._addRule(ruleObject) + + if self.documentObject.sources: + self.root.append(ET.Element("sources")) + for sourceObject in self.documentObject.sources: + self._addSource(sourceObject) + + if self.documentObject.variableFonts: + variableFontsElement = ET.Element("variable-fonts") + for variableFont in self.documentObject.variableFonts: + self._addVariableFont(variableFontsElement, variableFont) + self.root.append(variableFontsElement) + + if self.documentObject.instances: + self.root.append(ET.Element("instances")) + for instanceObject in self.documentObject.instances: + self._addInstance(instanceObject) + + if self.documentObject.lib: + self._addLib(self.root, self.documentObject.lib, 2) + + tree = ET.ElementTree(self.root) + tree.write( + self.path, + encoding=encoding, + method="xml", + xml_declaration=xml_declaration, + pretty_print=pretty, + ) + + def _getEffectiveFormatTuple(self): + """Try to use the version specified in the document, or a sufficiently + recent version to be able to encode what the document contains. + """ + minVersion = self.documentObject.formatTuple + if ( + any( + hasattr(axis, "values") + or axis.axisOrdering is not None + or axis.axisLabels + for axis in self.documentObject.axes + ) + or self.documentObject.locationLabels + or any(source.localisedFamilyName for source in self.documentObject.sources) + or self.documentObject.variableFonts + or any( + instance.locationLabel or instance.userLocation + for instance in self.documentObject.instances + ) + ): + if minVersion < (5, 0): + minVersion = (5, 0) + if self.documentObject.axisMappings: + if minVersion < (5, 1): + minVersion = (5, 1) + return minVersion + + def _makeLocationElement(self, locationObject, name=None): + """Convert Location dict to a locationElement.""" + locElement = ET.Element("location") + if name is not None: + locElement.attrib["name"] = name + validatedLocation = self.documentObject.newDefaultLocation() + for axisName, axisValue in locationObject.items(): + if axisName in validatedLocation: + # only accept values we know + validatedLocation[axisName] = axisValue + for dimensionName, dimensionValue in validatedLocation.items(): + dimElement = ET.Element("dimension") + dimElement.attrib["name"] = dimensionName + if type(dimensionValue) == tuple: + dimElement.attrib["xvalue"] = self.intOrFloat(dimensionValue[0]) + dimElement.attrib["yvalue"] = self.intOrFloat(dimensionValue[1]) + else: + dimElement.attrib["xvalue"] = self.intOrFloat(dimensionValue) + locElement.append(dimElement) + return locElement, validatedLocation + + def intOrFloat(self, num): + if int(num) == num: + return "%d" % num + return ("%f" % num).rstrip("0").rstrip(".") + + def _addRule(self, ruleObject): + ruleElement = ET.Element("rule") + if ruleObject.name is not None: + ruleElement.attrib["name"] = ruleObject.name + for conditions in ruleObject.conditionSets: + conditionsetElement = ET.Element("conditionset") + for cond in conditions: + if cond.get("minimum") is None and cond.get("maximum") is None: + # neither is defined, don't add this condition + continue + conditionElement = ET.Element("condition") + conditionElement.attrib["name"] = cond.get("name") + if cond.get("minimum") is not None: + conditionElement.attrib["minimum"] = self.intOrFloat( + cond.get("minimum") + ) + if cond.get("maximum") is not None: + conditionElement.attrib["maximum"] = self.intOrFloat( + cond.get("maximum") + ) + conditionsetElement.append(conditionElement) + # Serialize the conditionset even if it is empty, as this is the + # canonical way of defining a rule that is always true. + ruleElement.append(conditionsetElement) + for sub in ruleObject.subs: + subElement = ET.Element("sub") + subElement.attrib["name"] = sub[0] + subElement.attrib["with"] = sub[1] + ruleElement.append(subElement) + if len(ruleElement): + self.root.findall(".rules")[0].append(ruleElement) + + def _addAxis(self, axisObject): + axisElement = ET.Element("axis") + axisElement.attrib["tag"] = axisObject.tag + axisElement.attrib["name"] = axisObject.name + self._addLabelNames(axisElement, axisObject.labelNames) + if axisObject.map: + for inputValue, outputValue in axisObject.map: + mapElement = ET.Element("map") + mapElement.attrib["input"] = self.intOrFloat(inputValue) + mapElement.attrib["output"] = self.intOrFloat(outputValue) + axisElement.append(mapElement) + if axisObject.axisOrdering is not None or axisObject.axisLabels: + labelsElement = ET.Element("labels") + if axisObject.axisOrdering is not None: + labelsElement.attrib["ordering"] = str(axisObject.axisOrdering) + for label in axisObject.axisLabels: + self._addAxisLabel(labelsElement, label) + axisElement.append(labelsElement) + if hasattr(axisObject, "minimum"): + axisElement.attrib["minimum"] = self.intOrFloat(axisObject.minimum) + axisElement.attrib["maximum"] = self.intOrFloat(axisObject.maximum) + elif hasattr(axisObject, "values"): + axisElement.attrib["values"] = " ".join( + self.intOrFloat(v) for v in axisObject.values + ) + axisElement.attrib["default"] = self.intOrFloat(axisObject.default) + if axisObject.hidden: + axisElement.attrib["hidden"] = "1" + self.root.findall(".axes")[0].append(axisElement) + + def _addAxisMapping(self, mappingsElement, mappingObject): + mappingElement = ET.Element("mapping") + if getattr(mappingObject, "description", None) is not None: + mappingElement.attrib["description"] = mappingObject.description + for what in ("inputLocation", "outputLocation"): + whatObject = getattr(mappingObject, what, None) + if whatObject is None: + continue + whatElement = ET.Element(what[:-8]) + mappingElement.append(whatElement) + + for name, value in whatObject.items(): + dimensionElement = ET.Element("dimension") + dimensionElement.attrib["name"] = name + dimensionElement.attrib["xvalue"] = self.intOrFloat(value) + whatElement.append(dimensionElement) + + mappingsElement.append(mappingElement) + + def _addAxisLabel( + self, axisElement: ET.Element, label: AxisLabelDescriptor + ) -> None: + labelElement = ET.Element("label") + labelElement.attrib["uservalue"] = self.intOrFloat(label.userValue) + if label.userMinimum is not None: + labelElement.attrib["userminimum"] = self.intOrFloat(label.userMinimum) + if label.userMaximum is not None: + labelElement.attrib["usermaximum"] = self.intOrFloat(label.userMaximum) + labelElement.attrib["name"] = label.name + if label.elidable: + labelElement.attrib["elidable"] = "true" + if label.olderSibling: + labelElement.attrib["oldersibling"] = "true" + if label.linkedUserValue is not None: + labelElement.attrib["linkeduservalue"] = self.intOrFloat( + label.linkedUserValue + ) + self._addLabelNames(labelElement, label.labelNames) + axisElement.append(labelElement) + + def _addLabelNames(self, parentElement, labelNames): + for languageCode, labelName in sorted(labelNames.items()): + languageElement = ET.Element("labelname") + languageElement.attrib[XML_LANG] = languageCode + languageElement.text = labelName + parentElement.append(languageElement) + + def _addLocationLabel( + self, parentElement: ET.Element, label: LocationLabelDescriptor + ) -> None: + labelElement = ET.Element("label") + labelElement.attrib["name"] = label.name + if label.elidable: + labelElement.attrib["elidable"] = "true" + if label.olderSibling: + labelElement.attrib["oldersibling"] = "true" + self._addLabelNames(labelElement, label.labelNames) + self._addLocationElement(labelElement, userLocation=label.userLocation) + parentElement.append(labelElement) + + def _addLocationElement( + self, + parentElement, + *, + designLocation: AnisotropicLocationDict = None, + userLocation: SimpleLocationDict = None, + ): + locElement = ET.Element("location") + for axis in self.documentObject.axes: + if designLocation is not None and axis.name in designLocation: + dimElement = ET.Element("dimension") + dimElement.attrib["name"] = axis.name + value = designLocation[axis.name] + if isinstance(value, tuple): + dimElement.attrib["xvalue"] = self.intOrFloat(value[0]) + dimElement.attrib["yvalue"] = self.intOrFloat(value[1]) + else: + dimElement.attrib["xvalue"] = self.intOrFloat(value) + locElement.append(dimElement) + elif userLocation is not None and axis.name in userLocation: + dimElement = ET.Element("dimension") + dimElement.attrib["name"] = axis.name + value = userLocation[axis.name] + dimElement.attrib["uservalue"] = self.intOrFloat(value) + locElement.append(dimElement) + if len(locElement) > 0: + parentElement.append(locElement) + + def _addInstance(self, instanceObject): + instanceElement = ET.Element("instance") + if instanceObject.name is not None: + instanceElement.attrib["name"] = instanceObject.name + if instanceObject.locationLabel is not None: + instanceElement.attrib["location"] = instanceObject.locationLabel + if instanceObject.familyName is not None: + instanceElement.attrib["familyname"] = instanceObject.familyName + if instanceObject.styleName is not None: + instanceElement.attrib["stylename"] = instanceObject.styleName + # add localisations + if instanceObject.localisedStyleName: + languageCodes = list(instanceObject.localisedStyleName.keys()) + languageCodes.sort() + for code in languageCodes: + if code == "en": + continue # already stored in the element attribute + localisedStyleNameElement = ET.Element("stylename") + localisedStyleNameElement.attrib[XML_LANG] = code + localisedStyleNameElement.text = instanceObject.getStyleName(code) + instanceElement.append(localisedStyleNameElement) + if instanceObject.localisedFamilyName: + languageCodes = list(instanceObject.localisedFamilyName.keys()) + languageCodes.sort() + for code in languageCodes: + if code == "en": + continue # already stored in the element attribute + localisedFamilyNameElement = ET.Element("familyname") + localisedFamilyNameElement.attrib[XML_LANG] = code + localisedFamilyNameElement.text = instanceObject.getFamilyName(code) + instanceElement.append(localisedFamilyNameElement) + if instanceObject.localisedStyleMapStyleName: + languageCodes = list(instanceObject.localisedStyleMapStyleName.keys()) + languageCodes.sort() + for code in languageCodes: + if code == "en": + continue + localisedStyleMapStyleNameElement = ET.Element("stylemapstylename") + localisedStyleMapStyleNameElement.attrib[XML_LANG] = code + localisedStyleMapStyleNameElement.text = ( + instanceObject.getStyleMapStyleName(code) + ) + instanceElement.append(localisedStyleMapStyleNameElement) + if instanceObject.localisedStyleMapFamilyName: + languageCodes = list(instanceObject.localisedStyleMapFamilyName.keys()) + languageCodes.sort() + for code in languageCodes: + if code == "en": + continue + localisedStyleMapFamilyNameElement = ET.Element("stylemapfamilyname") + localisedStyleMapFamilyNameElement.attrib[XML_LANG] = code + localisedStyleMapFamilyNameElement.text = ( + instanceObject.getStyleMapFamilyName(code) + ) + instanceElement.append(localisedStyleMapFamilyNameElement) + + if self.effectiveFormatTuple >= (5, 0): + if instanceObject.locationLabel is None: + self._addLocationElement( + instanceElement, + designLocation=instanceObject.designLocation, + userLocation=instanceObject.userLocation, + ) + else: + # Pre-version 5.0 code was validating and filling in the location + # dict while writing it out, as preserved below. + if instanceObject.location is not None: + locationElement, instanceObject.location = self._makeLocationElement( + instanceObject.location + ) + instanceElement.append(locationElement) + if instanceObject.filename is not None: + instanceElement.attrib["filename"] = instanceObject.filename + if instanceObject.postScriptFontName is not None: + instanceElement.attrib["postscriptfontname"] = ( + instanceObject.postScriptFontName + ) + if instanceObject.styleMapFamilyName is not None: + instanceElement.attrib["stylemapfamilyname"] = ( + instanceObject.styleMapFamilyName + ) + if instanceObject.styleMapStyleName is not None: + instanceElement.attrib["stylemapstylename"] = ( + instanceObject.styleMapStyleName + ) + if self.effectiveFormatTuple < (5, 0): + # Deprecated members as of version 5.0 + if instanceObject.glyphs: + if instanceElement.findall(".glyphs") == []: + glyphsElement = ET.Element("glyphs") + instanceElement.append(glyphsElement) + glyphsElement = instanceElement.findall(".glyphs")[0] + for glyphName, data in sorted(instanceObject.glyphs.items()): + glyphElement = self._writeGlyphElement( + instanceElement, instanceObject, glyphName, data + ) + glyphsElement.append(glyphElement) + if instanceObject.kerning: + kerningElement = ET.Element("kerning") + instanceElement.append(kerningElement) + if instanceObject.info: + infoElement = ET.Element("info") + instanceElement.append(infoElement) + self._addLib(instanceElement, instanceObject.lib, 4) + self.root.findall(".instances")[0].append(instanceElement) + + def _addSource(self, sourceObject): + sourceElement = ET.Element("source") + if sourceObject.filename is not None: + sourceElement.attrib["filename"] = sourceObject.filename + if sourceObject.name is not None: + if sourceObject.name.find("temp_master") != 0: + # do not save temporary source names + sourceElement.attrib["name"] = sourceObject.name + if sourceObject.familyName is not None: + sourceElement.attrib["familyname"] = sourceObject.familyName + if sourceObject.styleName is not None: + sourceElement.attrib["stylename"] = sourceObject.styleName + if sourceObject.layerName is not None: + sourceElement.attrib["layer"] = sourceObject.layerName + if sourceObject.localisedFamilyName: + languageCodes = list(sourceObject.localisedFamilyName.keys()) + languageCodes.sort() + for code in languageCodes: + if code == "en": + continue # already stored in the element attribute + localisedFamilyNameElement = ET.Element("familyname") + localisedFamilyNameElement.attrib[XML_LANG] = code + localisedFamilyNameElement.text = sourceObject.getFamilyName(code) + sourceElement.append(localisedFamilyNameElement) + if sourceObject.copyLib: + libElement = ET.Element("lib") + libElement.attrib["copy"] = "1" + sourceElement.append(libElement) + if sourceObject.copyGroups: + groupsElement = ET.Element("groups") + groupsElement.attrib["copy"] = "1" + sourceElement.append(groupsElement) + if sourceObject.copyFeatures: + featuresElement = ET.Element("features") + featuresElement.attrib["copy"] = "1" + sourceElement.append(featuresElement) + if sourceObject.copyInfo or sourceObject.muteInfo: + infoElement = ET.Element("info") + if sourceObject.copyInfo: + infoElement.attrib["copy"] = "1" + if sourceObject.muteInfo: + infoElement.attrib["mute"] = "1" + sourceElement.append(infoElement) + if sourceObject.muteKerning: + kerningElement = ET.Element("kerning") + kerningElement.attrib["mute"] = "1" + sourceElement.append(kerningElement) + if sourceObject.mutedGlyphNames: + for name in sourceObject.mutedGlyphNames: + glyphElement = ET.Element("glyph") + glyphElement.attrib["name"] = name + glyphElement.attrib["mute"] = "1" + sourceElement.append(glyphElement) + if self.effectiveFormatTuple >= (5, 0): + self._addLocationElement( + sourceElement, designLocation=sourceObject.location + ) + else: + # Pre-version 5.0 code was validating and filling in the location + # dict while writing it out, as preserved below. + locationElement, sourceObject.location = self._makeLocationElement( + sourceObject.location + ) + sourceElement.append(locationElement) + self.root.findall(".sources")[0].append(sourceElement) + + def _addVariableFont( + self, parentElement: ET.Element, vf: VariableFontDescriptor + ) -> None: + vfElement = ET.Element("variable-font") + vfElement.attrib["name"] = vf.name + if vf.filename is not None: + vfElement.attrib["filename"] = vf.filename + if vf.axisSubsets: + subsetsElement = ET.Element("axis-subsets") + for subset in vf.axisSubsets: + subsetElement = ET.Element("axis-subset") + subsetElement.attrib["name"] = subset.name + # Mypy doesn't support narrowing union types via hasattr() + # https://mypy.readthedocs.io/en/stable/type_narrowing.html + # TODO(Python 3.10): use TypeGuard + if hasattr(subset, "userMinimum"): + subset = cast(RangeAxisSubsetDescriptor, subset) + if subset.userMinimum != -math.inf: + subsetElement.attrib["userminimum"] = self.intOrFloat( + subset.userMinimum + ) + if subset.userMaximum != math.inf: + subsetElement.attrib["usermaximum"] = self.intOrFloat( + subset.userMaximum + ) + if subset.userDefault is not None: + subsetElement.attrib["userdefault"] = self.intOrFloat( + subset.userDefault + ) + elif hasattr(subset, "userValue"): + subset = cast(ValueAxisSubsetDescriptor, subset) + subsetElement.attrib["uservalue"] = self.intOrFloat( + subset.userValue + ) + subsetsElement.append(subsetElement) + vfElement.append(subsetsElement) + self._addLib(vfElement, vf.lib, 4) + parentElement.append(vfElement) + + def _addLib(self, parentElement: ET.Element, data: Any, indent_level: int) -> None: + if not data: + return + libElement = ET.Element("lib") + libElement.append(plistlib.totree(data, indent_level=indent_level)) + parentElement.append(libElement) + + def _writeGlyphElement(self, instanceElement, instanceObject, glyphName, data): + glyphElement = ET.Element("glyph") + if data.get("mute"): + glyphElement.attrib["mute"] = "1" + if data.get("unicodes") is not None: + glyphElement.attrib["unicode"] = " ".join( + [hex(u) for u in data.get("unicodes")] + ) + if data.get("instanceLocation") is not None: + locationElement, data["instanceLocation"] = self._makeLocationElement( + data.get("instanceLocation") + ) + glyphElement.append(locationElement) + if glyphName is not None: + glyphElement.attrib["name"] = glyphName + if data.get("note") is not None: + noteElement = ET.Element("note") + noteElement.text = data.get("note") + glyphElement.append(noteElement) + if data.get("masters") is not None: + mastersElement = ET.Element("masters") + for m in data.get("masters"): + masterElement = ET.Element("master") + if m.get("glyphName") is not None: + masterElement.attrib["glyphname"] = m.get("glyphName") + if m.get("font") is not None: + masterElement.attrib["source"] = m.get("font") + if m.get("location") is not None: + locationElement, m["location"] = self._makeLocationElement( + m.get("location") + ) + masterElement.append(locationElement) + mastersElement.append(masterElement) + glyphElement.append(mastersElement) + return glyphElement + + +class BaseDocReader(LogMixin): + axisDescriptorClass = AxisDescriptor + discreteAxisDescriptorClass = DiscreteAxisDescriptor + axisLabelDescriptorClass = AxisLabelDescriptor + axisMappingDescriptorClass = AxisMappingDescriptor + locationLabelDescriptorClass = LocationLabelDescriptor + ruleDescriptorClass = RuleDescriptor + sourceDescriptorClass = SourceDescriptor + variableFontsDescriptorClass = VariableFontDescriptor + valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor + rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor + instanceDescriptorClass = InstanceDescriptor + + def __init__(self, documentPath, documentObject): + self.path = documentPath + self.documentObject = documentObject + tree = ET.parse(self.path) + self.root = tree.getroot() + self.documentObject.formatVersion = self.root.attrib.get("format", "3.0") + self._axes = [] + self.rules = [] + self.sources = [] + self.instances = [] + self.axisDefaults = {} + self._strictAxisNames = True + + @classmethod + def fromstring(cls, string, documentObject): + f = BytesIO(tobytes(string, encoding="utf-8")) + self = cls(f, documentObject) + self.path = None + return self + + def read(self): + self.readAxes() + self.readLabels() + self.readRules() + self.readVariableFonts() + self.readSources() + self.readInstances() + self.readLib() + + def readRules(self): + # we also need to read any conditions that are outside of a condition set. + rules = [] + rulesElement = self.root.find(".rules") + if rulesElement is not None: + processingValue = rulesElement.attrib.get("processing", "first") + if processingValue not in {"first", "last"}: + raise DesignSpaceDocumentError( + " processing attribute value is not valid: %r, " + "expected 'first' or 'last'" % processingValue + ) + self.documentObject.rulesProcessingLast = processingValue == "last" + for ruleElement in self.root.findall(".rules/rule"): + ruleObject = self.ruleDescriptorClass() + ruleName = ruleObject.name = ruleElement.attrib.get("name") + # read any stray conditions outside a condition set + externalConditions = self._readConditionElements( + ruleElement, + ruleName, + ) + if externalConditions: + ruleObject.conditionSets.append(externalConditions) + self.log.info( + "Found stray rule conditions outside a conditionset. " + "Wrapped them in a new conditionset." + ) + # read the conditionsets + for conditionSetElement in ruleElement.findall(".conditionset"): + conditionSet = self._readConditionElements( + conditionSetElement, + ruleName, + ) + if conditionSet is not None: + ruleObject.conditionSets.append(conditionSet) + for subElement in ruleElement.findall(".sub"): + a = subElement.attrib["name"] + b = subElement.attrib["with"] + ruleObject.subs.append((a, b)) + rules.append(ruleObject) + self.documentObject.rules = rules + + def _readConditionElements(self, parentElement, ruleName=None): + cds = [] + for conditionElement in parentElement.findall(".condition"): + cd = {} + cdMin = conditionElement.attrib.get("minimum") + if cdMin is not None: + cd["minimum"] = float(cdMin) + else: + # will allow these to be None, assume axis.minimum + cd["minimum"] = None + cdMax = conditionElement.attrib.get("maximum") + if cdMax is not None: + cd["maximum"] = float(cdMax) + else: + # will allow these to be None, assume axis.maximum + cd["maximum"] = None + cd["name"] = conditionElement.attrib.get("name") + # # test for things + if cd.get("minimum") is None and cd.get("maximum") is None: + raise DesignSpaceDocumentError( + "condition missing required minimum or maximum in rule" + + (" '%s'" % ruleName if ruleName is not None else "") + ) + cds.append(cd) + return cds + + def readAxes(self): + # read the axes elements, including the warp map. + axesElement = self.root.find(".axes") + if axesElement is not None and "elidedfallbackname" in axesElement.attrib: + self.documentObject.elidedFallbackName = axesElement.attrib[ + "elidedfallbackname" + ] + axisElements = self.root.findall(".axes/axis") + if not axisElements: + return + for axisElement in axisElements: + if ( + self.documentObject.formatTuple >= (5, 0) + and "values" in axisElement.attrib + ): + axisObject = self.discreteAxisDescriptorClass() + axisObject.values = [ + float(s) for s in axisElement.attrib["values"].split(" ") + ] + else: + axisObject = self.axisDescriptorClass() + axisObject.minimum = float(axisElement.attrib.get("minimum")) + axisObject.maximum = float(axisElement.attrib.get("maximum")) + axisObject.default = float(axisElement.attrib.get("default")) + axisObject.name = axisElement.attrib.get("name") + if axisElement.attrib.get("hidden", False): + axisObject.hidden = True + axisObject.tag = axisElement.attrib.get("tag") + for mapElement in axisElement.findall("map"): + a = float(mapElement.attrib["input"]) + b = float(mapElement.attrib["output"]) + axisObject.map.append((a, b)) + for labelNameElement in axisElement.findall("labelname"): + # Note: elementtree reads the "xml:lang" attribute name as + # '{http://www.w3.org/XML/1998/namespace}lang' + for key, lang in labelNameElement.items(): + if key == XML_LANG: + axisObject.labelNames[lang] = tostr(labelNameElement.text) + labelElement = axisElement.find(".labels") + if labelElement is not None: + if "ordering" in labelElement.attrib: + axisObject.axisOrdering = int(labelElement.attrib["ordering"]) + for label in labelElement.findall(".label"): + axisObject.axisLabels.append(self.readAxisLabel(label)) + self.documentObject.axes.append(axisObject) + self.axisDefaults[axisObject.name] = axisObject.default + + self.documentObject.axisMappings = [] + for mappingsElement in self.root.findall(".axes/mappings"): + groupDescription = mappingsElement.attrib.get("description") + for mappingElement in mappingsElement.findall("mapping"): + description = mappingElement.attrib.get("description") + inputElement = mappingElement.find("input") + outputElement = mappingElement.find("output") + inputLoc = {} + outputLoc = {} + for dimElement in inputElement.findall(".dimension"): + name = dimElement.attrib["name"] + value = float(dimElement.attrib["xvalue"]) + inputLoc[name] = value + for dimElement in outputElement.findall(".dimension"): + name = dimElement.attrib["name"] + value = float(dimElement.attrib["xvalue"]) + outputLoc[name] = value + axisMappingObject = self.axisMappingDescriptorClass( + inputLocation=inputLoc, + outputLocation=outputLoc, + description=description, + groupDescription=groupDescription, + ) + self.documentObject.axisMappings.append(axisMappingObject) + + def readAxisLabel(self, element: ET.Element): + xml_attrs = { + "userminimum", + "uservalue", + "usermaximum", + "name", + "elidable", + "oldersibling", + "linkeduservalue", + } + unknown_attrs = set(element.attrib) - xml_attrs + if unknown_attrs: + raise DesignSpaceDocumentError( + f"label element contains unknown attributes: {', '.join(unknown_attrs)}" + ) + + name = element.get("name") + if name is None: + raise DesignSpaceDocumentError("label element must have a name attribute.") + valueStr = element.get("uservalue") + if valueStr is None: + raise DesignSpaceDocumentError( + "label element must have a uservalue attribute." + ) + value = float(valueStr) + minimumStr = element.get("userminimum") + minimum = float(minimumStr) if minimumStr is not None else None + maximumStr = element.get("usermaximum") + maximum = float(maximumStr) if maximumStr is not None else None + linkedValueStr = element.get("linkeduservalue") + linkedValue = float(linkedValueStr) if linkedValueStr is not None else None + elidable = True if element.get("elidable") == "true" else False + olderSibling = True if element.get("oldersibling") == "true" else False + labelNames = { + lang: label_name.text or "" + for label_name in element.findall("labelname") + for attr, lang in label_name.items() + if attr == XML_LANG + # Note: elementtree reads the "xml:lang" attribute name as + # '{http://www.w3.org/XML/1998/namespace}lang' + } + return self.axisLabelDescriptorClass( + name=name, + userValue=value, + userMinimum=minimum, + userMaximum=maximum, + elidable=elidable, + olderSibling=olderSibling, + linkedUserValue=linkedValue, + labelNames=labelNames, + ) + + def readLabels(self): + if self.documentObject.formatTuple < (5, 0): + return + + xml_attrs = {"name", "elidable", "oldersibling"} + for labelElement in self.root.findall(".labels/label"): + unknown_attrs = set(labelElement.attrib) - xml_attrs + if unknown_attrs: + raise DesignSpaceDocumentError( + f"Label element contains unknown attributes: {', '.join(unknown_attrs)}" + ) + + name = labelElement.get("name") + if name is None: + raise DesignSpaceDocumentError( + "label element must have a name attribute." + ) + designLocation, userLocation = self.locationFromElement(labelElement) + if designLocation: + raise DesignSpaceDocumentError( + f'